2024-12-05 00:23:40,136 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca 2024-12-05 00:23:40,147 main DEBUG Took 0.009856 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-12-05 00:23:40,148 main DEBUG PluginManager 'Core' found 129 plugins 2024-12-05 00:23:40,148 main DEBUG PluginManager 'Level' found 0 plugins 2024-12-05 00:23:40,149 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-12-05 00:23:40,150 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-05 00:23:40,157 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-12-05 00:23:40,168 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-05 00:23:40,169 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-05 00:23:40,170 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-05 00:23:40,170 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-05 00:23:40,171 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-05 00:23:40,171 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-05 00:23:40,172 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-05 00:23:40,172 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-05 00:23:40,172 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-05 00:23:40,172 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-05 00:23:40,173 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-05 00:23:40,173 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-05 00:23:40,174 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-05 00:23:40,174 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-05 00:23:40,175 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-05 00:23:40,175 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-05 00:23:40,175 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-05 00:23:40,175 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-05 00:23:40,176 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-05 00:23:40,176 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-05 00:23:40,177 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-05 00:23:40,177 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-05 00:23:40,177 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-05 00:23:40,177 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-05 00:23:40,178 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-05 00:23:40,178 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-12-05 00:23:40,179 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-05 00:23:40,181 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-12-05 00:23:40,182 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-12-05 00:23:40,183 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-12-05 00:23:40,184 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-12-05 00:23:40,184 main DEBUG PluginManager 'Converter' found 47 plugins 2024-12-05 00:23:40,192 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-12-05 00:23:40,194 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-12-05 00:23:40,196 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-12-05 00:23:40,196 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-12-05 00:23:40,197 main DEBUG createAppenders(={Console}) 2024-12-05 00:23:40,198 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca initialized 2024-12-05 00:23:40,198 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca 2024-12-05 00:23:40,198 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca OK. 2024-12-05 00:23:40,199 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-12-05 00:23:40,199 main DEBUG OutputStream closed 2024-12-05 00:23:40,199 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-12-05 00:23:40,199 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-12-05 00:23:40,200 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@6404f418 OK 2024-12-05 00:23:40,265 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-12-05 00:23:40,267 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-12-05 00:23:40,268 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-12-05 00:23:40,269 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-12-05 00:23:40,269 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-12-05 00:23:40,270 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-12-05 00:23:40,270 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-12-05 00:23:40,270 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-12-05 00:23:40,271 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-12-05 00:23:40,271 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-12-05 00:23:40,271 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-12-05 00:23:40,271 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-12-05 00:23:40,272 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-12-05 00:23:40,272 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-12-05 00:23:40,272 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-12-05 00:23:40,273 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-12-05 00:23:40,273 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-12-05 00:23:40,274 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-12-05 00:23:40,276 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-05 00:23:40,276 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-logging/target/hbase-logging-3.0.0-beta-2-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@6dab9b6d) with optional ClassLoader: null 2024-12-05 00:23:40,277 main DEBUG Shutdown hook enabled. Registering a new one. 2024-12-05 00:23:40,277 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@6dab9b6d] started OK. 2024-12-05T00:23:40,507 DEBUG [main {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/703f8370-3f4b-8740-767a-d662c6a0d9fc 2024-12-05 00:23:40,510 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-12-05 00:23:40,510 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-05T00:23:40,522 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.regionserver.wal.TestLogRolling timeout: 13 mins 2024-12-05T00:23:40,558 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testSlowSyncLogRolling Thread=12, OpenFileDescriptor=287, MaxFileDescriptor=1048576, SystemLoadAverage=320, ProcessCount=11, AvailableMemoryMB=9361 2024-12-05T00:23:40,561 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-05T00:23:40,579 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/703f8370-3f4b-8740-767a-d662c6a0d9fc/cluster_a63b60db-13ec-cfe8-9570-22099ec66976, deleteOnExit=true 2024-12-05T00:23:40,579 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-12-05T00:23:40,580 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/703f8370-3f4b-8740-767a-d662c6a0d9fc/test.cache.data in system properties and HBase conf 2024-12-05T00:23:40,581 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/703f8370-3f4b-8740-767a-d662c6a0d9fc/hadoop.tmp.dir in system properties and HBase conf 2024-12-05T00:23:40,582 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/703f8370-3f4b-8740-767a-d662c6a0d9fc/hadoop.log.dir in system properties and HBase conf 2024-12-05T00:23:40,582 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/703f8370-3f4b-8740-767a-d662c6a0d9fc/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-05T00:23:40,583 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/703f8370-3f4b-8740-767a-d662c6a0d9fc/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-05T00:23:40,583 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-05T00:23:40,684 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-12-05T00:23:40,792 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-05T00:23:40,795 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/703f8370-3f4b-8740-767a-d662c6a0d9fc/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-05T00:23:40,796 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/703f8370-3f4b-8740-767a-d662c6a0d9fc/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-05T00:23:40,796 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/703f8370-3f4b-8740-767a-d662c6a0d9fc/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-05T00:23:40,797 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/703f8370-3f4b-8740-767a-d662c6a0d9fc/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-05T00:23:40,797 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/703f8370-3f4b-8740-767a-d662c6a0d9fc/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-05T00:23:40,797 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/703f8370-3f4b-8740-767a-d662c6a0d9fc/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-05T00:23:40,798 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/703f8370-3f4b-8740-767a-d662c6a0d9fc/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-05T00:23:40,798 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/703f8370-3f4b-8740-767a-d662c6a0d9fc/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-05T00:23:40,799 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/703f8370-3f4b-8740-767a-d662c6a0d9fc/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-05T00:23:40,799 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/703f8370-3f4b-8740-767a-d662c6a0d9fc/nfs.dump.dir in system properties and HBase conf 2024-12-05T00:23:40,799 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/703f8370-3f4b-8740-767a-d662c6a0d9fc/java.io.tmpdir in system properties and HBase conf 2024-12-05T00:23:40,800 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/703f8370-3f4b-8740-767a-d662c6a0d9fc/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-05T00:23:40,800 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/703f8370-3f4b-8740-767a-d662c6a0d9fc/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-05T00:23:40,801 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/703f8370-3f4b-8740-767a-d662c6a0d9fc/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-05T00:23:41,293 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-05T00:23:41,625 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-12-05T00:23:41,709 INFO [Time-limited test {}] log.Log(170): Logging initialized @2335ms to org.eclipse.jetty.util.log.Slf4jLog 2024-12-05T00:23:41,789 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-05T00:23:41,856 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-05T00:23:41,878 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-05T00:23:41,878 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-05T00:23:41,880 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-05T00:23:41,896 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-05T00:23:41,898 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@ddc8467{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/703f8370-3f4b-8740-767a-d662c6a0d9fc/hadoop.log.dir/,AVAILABLE} 2024-12-05T00:23:41,899 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@70be1389{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-05T00:23:42,108 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@735fa16a{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/703f8370-3f4b-8740-767a-d662c6a0d9fc/java.io.tmpdir/jetty-localhost-38351-hadoop-hdfs-3_4_1-tests_jar-_-any-8668848711383641218/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-05T00:23:42,115 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6c26a5a3{HTTP/1.1, (http/1.1)}{localhost:38351} 2024-12-05T00:23:42,115 INFO [Time-limited test {}] server.Server(415): Started @2743ms 2024-12-05T00:23:42,141 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-05T00:23:42,508 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-05T00:23:42,516 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-05T00:23:42,517 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-05T00:23:42,517 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-05T00:23:42,517 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-05T00:23:42,518 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@28778f0f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/703f8370-3f4b-8740-767a-d662c6a0d9fc/hadoop.log.dir/,AVAILABLE} 2024-12-05T00:23:42,519 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@371e191c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-05T00:23:42,639 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7b07d1ba{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/703f8370-3f4b-8740-767a-d662c6a0d9fc/java.io.tmpdir/jetty-localhost-33353-hadoop-hdfs-3_4_1-tests_jar-_-any-11416605953484239314/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-05T00:23:42,640 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@43e0a762{HTTP/1.1, (http/1.1)}{localhost:33353} 2024-12-05T00:23:42,640 INFO [Time-limited test {}] server.Server(415): Started @3267ms 2024-12-05T00:23:42,700 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-05T00:23:42,835 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-05T00:23:42,842 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-05T00:23:42,844 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-05T00:23:42,844 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-05T00:23:42,845 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-05T00:23:42,846 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@11effdcd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/703f8370-3f4b-8740-767a-d662c6a0d9fc/hadoop.log.dir/,AVAILABLE} 2024-12-05T00:23:42,846 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2d48d695{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-05T00:23:42,966 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1bf97579{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/703f8370-3f4b-8740-767a-d662c6a0d9fc/java.io.tmpdir/jetty-localhost-37361-hadoop-hdfs-3_4_1-tests_jar-_-any-9379006139687382458/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-05T00:23:42,967 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@22b88bcb{HTTP/1.1, (http/1.1)}{localhost:37361} 2024-12-05T00:23:42,967 INFO [Time-limited test {}] server.Server(415): Started @3594ms 2024-12-05T00:23:42,969 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-05T00:23:43,190 WARN [Thread-98 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/703f8370-3f4b-8740-767a-d662c6a0d9fc/cluster_a63b60db-13ec-cfe8-9570-22099ec66976/data/data2/current/BP-1977460799-172.17.0.2-1733358221383/current, will proceed with Du for space computation calculation, 2024-12-05T00:23:43,190 WARN [Thread-97 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/703f8370-3f4b-8740-767a-d662c6a0d9fc/cluster_a63b60db-13ec-cfe8-9570-22099ec66976/data/data1/current/BP-1977460799-172.17.0.2-1733358221383/current, will proceed with Du for space computation calculation, 2024-12-05T00:23:43,190 WARN [Thread-96 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/703f8370-3f4b-8740-767a-d662c6a0d9fc/cluster_a63b60db-13ec-cfe8-9570-22099ec66976/data/data4/current/BP-1977460799-172.17.0.2-1733358221383/current, will proceed with Du for space computation calculation, 2024-12-05T00:23:43,190 WARN [Thread-95 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/703f8370-3f4b-8740-767a-d662c6a0d9fc/cluster_a63b60db-13ec-cfe8-9570-22099ec66976/data/data3/current/BP-1977460799-172.17.0.2-1733358221383/current, will proceed with Du for space computation calculation, 2024-12-05T00:23:43,254 WARN [Thread-81 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-05T00:23:43,259 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-05T00:23:43,346 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xe889b9d6f5a452f5 with lease ID 0xf1ad42d71b002996: Processing first storage report for DS-a4e66060-ab46-4fa0-935e-7ea2399c198a from datanode DatanodeRegistration(127.0.0.1:40023, datanodeUuid=fe788050-3610-4b50-a546-99e744c8ae07, infoPort=36099, infoSecurePort=0, ipcPort=35609, storageInfo=lv=-57;cid=testClusterID;nsid=10476316;c=1733358221383) 2024-12-05T00:23:43,347 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xe889b9d6f5a452f5 with lease ID 0xf1ad42d71b002996: from storage DS-a4e66060-ab46-4fa0-935e-7ea2399c198a node DatanodeRegistration(127.0.0.1:40023, datanodeUuid=fe788050-3610-4b50-a546-99e744c8ae07, infoPort=36099, infoSecurePort=0, ipcPort=35609, storageInfo=lv=-57;cid=testClusterID;nsid=10476316;c=1733358221383), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-05T00:23:43,348 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x8cca56aaf861e605 with lease ID 0xf1ad42d71b002997: Processing first storage report for DS-b2e319e7-7be9-4588-9599-ac2f1d57f49d from datanode DatanodeRegistration(127.0.0.1:38077, datanodeUuid=2553d511-29bd-451d-987a-25b7a501f835, infoPort=38573, infoSecurePort=0, ipcPort=39733, storageInfo=lv=-57;cid=testClusterID;nsid=10476316;c=1733358221383) 2024-12-05T00:23:43,348 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x8cca56aaf861e605 with lease ID 0xf1ad42d71b002997: from storage DS-b2e319e7-7be9-4588-9599-ac2f1d57f49d node DatanodeRegistration(127.0.0.1:38077, datanodeUuid=2553d511-29bd-451d-987a-25b7a501f835, infoPort=38573, infoSecurePort=0, ipcPort=39733, storageInfo=lv=-57;cid=testClusterID;nsid=10476316;c=1733358221383), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-05T00:23:43,348 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xe889b9d6f5a452f5 with lease ID 0xf1ad42d71b002996: Processing first storage report for DS-2f0db98d-84bd-40b5-acec-8a57b825d67d from datanode DatanodeRegistration(127.0.0.1:40023, datanodeUuid=fe788050-3610-4b50-a546-99e744c8ae07, infoPort=36099, infoSecurePort=0, ipcPort=35609, storageInfo=lv=-57;cid=testClusterID;nsid=10476316;c=1733358221383) 2024-12-05T00:23:43,349 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xe889b9d6f5a452f5 with lease ID 0xf1ad42d71b002996: from storage DS-2f0db98d-84bd-40b5-acec-8a57b825d67d node DatanodeRegistration(127.0.0.1:40023, datanodeUuid=fe788050-3610-4b50-a546-99e744c8ae07, infoPort=36099, infoSecurePort=0, ipcPort=35609, storageInfo=lv=-57;cid=testClusterID;nsid=10476316;c=1733358221383), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-05T00:23:43,349 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x8cca56aaf861e605 with lease ID 0xf1ad42d71b002997: Processing first storage report for DS-4f738931-8727-483c-b9e8-526c42be4afd from datanode DatanodeRegistration(127.0.0.1:38077, datanodeUuid=2553d511-29bd-451d-987a-25b7a501f835, infoPort=38573, infoSecurePort=0, ipcPort=39733, storageInfo=lv=-57;cid=testClusterID;nsid=10476316;c=1733358221383) 2024-12-05T00:23:43,349 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x8cca56aaf861e605 with lease ID 0xf1ad42d71b002997: from storage DS-4f738931-8727-483c-b9e8-526c42be4afd node DatanodeRegistration(127.0.0.1:38077, datanodeUuid=2553d511-29bd-451d-987a-25b7a501f835, infoPort=38573, infoSecurePort=0, ipcPort=39733, storageInfo=lv=-57;cid=testClusterID;nsid=10476316;c=1733358221383), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-05T00:23:43,410 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/703f8370-3f4b-8740-767a-d662c6a0d9fc 2024-12-05T00:23:43,490 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/703f8370-3f4b-8740-767a-d662c6a0d9fc/cluster_a63b60db-13ec-cfe8-9570-22099ec66976/zookeeper_0, clientPort=51053, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/703f8370-3f4b-8740-767a-d662c6a0d9fc/cluster_a63b60db-13ec-cfe8-9570-22099ec66976/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/703f8370-3f4b-8740-767a-d662c6a0d9fc/cluster_a63b60db-13ec-cfe8-9570-22099ec66976/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-05T00:23:43,499 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=51053 2024-12-05T00:23:43,512 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-05T00:23:43,516 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-05T00:23:43,776 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38077 is added to blk_1073741825_1001 (size=7) 2024-12-05T00:23:43,777 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40023 is added to blk_1073741825_1001 (size=7) 2024-12-05T00:23:44,184 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:45273/user/jenkins/test-data/7f70659c-3a14-bb99-79d3-4868f5326e37 with version=8 2024-12-05T00:23:44,185 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:45273/user/jenkins/test-data/7f70659c-3a14-bb99-79d3-4868f5326e37/hbase-staging 2024-12-05T00:23:44,276 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-12-05T00:23:44,524 INFO [Time-limited test {}] client.ConnectionUtils(128): master/2113c16e5528:0 server-side Connection retries=45 2024-12-05T00:23:44,535 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-05T00:23:44,535 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-05T00:23:44,539 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-05T00:23:44,539 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-05T00:23:44,540 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-05T00:23:44,683 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-05T00:23:44,749 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-12-05T00:23:44,762 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-12-05T00:23:44,767 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-05T00:23:44,799 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 7632 (auto-detected) 2024-12-05T00:23:44,800 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-12-05T00:23:44,821 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:35931 2024-12-05T00:23:44,842 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:35931 connecting to ZooKeeper ensemble=127.0.0.1:51053 2024-12-05T00:23:44,882 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:359310x0, quorum=127.0.0.1:51053, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-05T00:23:44,885 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:35931-0x10180016afe0000 connected 2024-12-05T00:23:44,916 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-05T00:23:44,918 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-05T00:23:44,928 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:35931-0x10180016afe0000, quorum=127.0.0.1:51053, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-05T00:23:44,932 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:45273/user/jenkins/test-data/7f70659c-3a14-bb99-79d3-4868f5326e37, hbase.cluster.distributed=false 2024-12-05T00:23:44,956 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:35931-0x10180016afe0000, quorum=127.0.0.1:51053, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-05T00:23:44,960 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=35931 2024-12-05T00:23:44,961 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=35931 2024-12-05T00:23:44,961 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=35931 2024-12-05T00:23:44,962 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=35931 2024-12-05T00:23:44,963 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=35931 2024-12-05T00:23:45,086 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/2113c16e5528:0 server-side Connection retries=45 2024-12-05T00:23:45,088 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-05T00:23:45,088 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-05T00:23:45,088 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-05T00:23:45,088 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-05T00:23:45,089 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-05T00:23:45,092 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-05T00:23:45,094 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-05T00:23:45,095 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:36827 2024-12-05T00:23:45,096 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:36827 connecting to ZooKeeper ensemble=127.0.0.1:51053 2024-12-05T00:23:45,098 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-05T00:23:45,101 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-05T00:23:45,108 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:368270x0, quorum=127.0.0.1:51053, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-05T00:23:45,108 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:36827-0x10180016afe0001 connected 2024-12-05T00:23:45,110 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:36827-0x10180016afe0001, quorum=127.0.0.1:51053, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-05T00:23:45,114 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-05T00:23:45,121 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-05T00:23:45,124 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:36827-0x10180016afe0001, quorum=127.0.0.1:51053, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-05T00:23:45,129 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:36827-0x10180016afe0001, quorum=127.0.0.1:51053, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-05T00:23:45,130 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=36827 2024-12-05T00:23:45,130 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=36827 2024-12-05T00:23:45,130 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=36827 2024-12-05T00:23:45,132 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=36827 2024-12-05T00:23:45,132 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=36827 2024-12-05T00:23:45,148 DEBUG [M:0;2113c16e5528:35931 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;2113c16e5528:35931 2024-12-05T00:23:45,149 INFO [master/2113c16e5528:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/2113c16e5528,35931,1733358224328 2024-12-05T00:23:45,157 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35931-0x10180016afe0000, quorum=127.0.0.1:51053, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-05T00:23:45,157 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36827-0x10180016afe0001, quorum=127.0.0.1:51053, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-05T00:23:45,159 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:35931-0x10180016afe0000, quorum=127.0.0.1:51053, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/2113c16e5528,35931,1733358224328 2024-12-05T00:23:45,180 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36827-0x10180016afe0001, quorum=127.0.0.1:51053, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-05T00:23:45,180 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35931-0x10180016afe0000, quorum=127.0.0.1:51053, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T00:23:45,180 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36827-0x10180016afe0001, quorum=127.0.0.1:51053, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T00:23:45,181 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:35931-0x10180016afe0000, quorum=127.0.0.1:51053, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-05T00:23:45,182 INFO [master/2113c16e5528:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/2113c16e5528,35931,1733358224328 from backup master directory 2024-12-05T00:23:45,186 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35931-0x10180016afe0000, quorum=127.0.0.1:51053, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/2113c16e5528,35931,1733358224328 2024-12-05T00:23:45,186 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36827-0x10180016afe0001, quorum=127.0.0.1:51053, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-05T00:23:45,186 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35931-0x10180016afe0000, quorum=127.0.0.1:51053, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-05T00:23:45,187 WARN [master/2113c16e5528:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-05T00:23:45,187 INFO [master/2113c16e5528:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=2113c16e5528,35931,1733358224328 2024-12-05T00:23:45,190 INFO [master/2113c16e5528:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-12-05T00:23:45,192 INFO [master/2113c16e5528:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-12-05T00:23:45,249 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:45273/user/jenkins/test-data/7f70659c-3a14-bb99-79d3-4868f5326e37/hbase.id] with ID: 3fb41781-a778-4b60-9ba3-ff65aa541172 2024-12-05T00:23:45,250 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:45273/user/jenkins/test-data/7f70659c-3a14-bb99-79d3-4868f5326e37/.tmp/hbase.id 2024-12-05T00:23:45,261 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40023 is added to blk_1073741826_1002 (size=42) 2024-12-05T00:23:45,262 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38077 is added to blk_1073741826_1002 (size=42) 2024-12-05T00:23:45,262 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:45273/user/jenkins/test-data/7f70659c-3a14-bb99-79d3-4868f5326e37/.tmp/hbase.id]:[hdfs://localhost:45273/user/jenkins/test-data/7f70659c-3a14-bb99-79d3-4868f5326e37/hbase.id] 2024-12-05T00:23:45,309 INFO [master/2113c16e5528:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-05T00:23:45,315 INFO [master/2113c16e5528:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-05T00:23:45,337 INFO [master/2113c16e5528:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 20ms. 2024-12-05T00:23:45,341 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35931-0x10180016afe0000, quorum=127.0.0.1:51053, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T00:23:45,341 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36827-0x10180016afe0001, quorum=127.0.0.1:51053, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T00:23:45,362 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38077 is added to blk_1073741827_1003 (size=196) 2024-12-05T00:23:45,362 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40023 is added to blk_1073741827_1003 (size=196) 2024-12-05T00:23:45,381 INFO [master/2113c16e5528:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-05T00:23:45,383 INFO [master/2113c16e5528:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-05T00:23:45,389 INFO [master/2113c16e5528:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-05T00:23:45,423 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40023 is added to blk_1073741828_1004 (size=1189) 2024-12-05T00:23:45,423 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38077 is added to blk_1073741828_1004 (size=1189) 2024-12-05T00:23:45,443 INFO [master/2113c16e5528:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:45273/user/jenkins/test-data/7f70659c-3a14-bb99-79d3-4868f5326e37/MasterData/data/master/store 2024-12-05T00:23:45,465 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40023 is added to blk_1073741829_1005 (size=34) 2024-12-05T00:23:45,465 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38077 is added to blk_1073741829_1005 (size=34) 2024-12-05T00:23:45,470 INFO [master/2113c16e5528:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-12-05T00:23:45,473 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T00:23:45,475 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-05T00:23:45,475 INFO [master/2113c16e5528:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-05T00:23:45,475 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-05T00:23:45,476 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-05T00:23:45,477 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-05T00:23:45,477 INFO [master/2113c16e5528:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-05T00:23:45,478 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733358225474Disabling compacts and flushes for region at 1733358225474Disabling writes for close at 1733358225477 (+3 ms)Writing region close event to WAL at 1733358225477Closed at 1733358225477 2024-12-05T00:23:45,480 WARN [master/2113c16e5528:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:45273/user/jenkins/test-data/7f70659c-3a14-bb99-79d3-4868f5326e37/MasterData/data/master/store/.initializing 2024-12-05T00:23:45,480 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:45273/user/jenkins/test-data/7f70659c-3a14-bb99-79d3-4868f5326e37/MasterData/WALs/2113c16e5528,35931,1733358224328 2024-12-05T00:23:45,502 INFO [master/2113c16e5528:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=2113c16e5528%2C35931%2C1733358224328, suffix=, logDir=hdfs://localhost:45273/user/jenkins/test-data/7f70659c-3a14-bb99-79d3-4868f5326e37/MasterData/WALs/2113c16e5528,35931,1733358224328, archiveDir=hdfs://localhost:45273/user/jenkins/test-data/7f70659c-3a14-bb99-79d3-4868f5326e37/MasterData/oldWALs, maxLogs=10 2024-12-05T00:23:45,511 INFO [master/2113c16e5528:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 2113c16e5528%2C35931%2C1733358224328.1733358225507 2024-12-05T00:23:45,529 INFO [master/2113c16e5528:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/7f70659c-3a14-bb99-79d3-4868f5326e37/MasterData/WALs/2113c16e5528,35931,1733358224328/2113c16e5528%2C35931%2C1733358224328.1733358225507 2024-12-05T00:23:45,539 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38573:38573),(127.0.0.1/127.0.0.1:36099:36099)] 2024-12-05T00:23:45,541 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-05T00:23:45,541 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T00:23:45,544 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-05T00:23:45,545 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-05T00:23:45,581 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-05T00:23:45,606 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-05T00:23:45,610 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T00:23:45,613 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-05T00:23:45,613 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-05T00:23:45,616 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-05T00:23:45,617 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T00:23:45,618 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T00:23:45,618 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-05T00:23:45,620 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-05T00:23:45,620 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T00:23:45,621 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T00:23:45,622 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-05T00:23:45,624 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-05T00:23:45,624 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T00:23:45,625 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T00:23:45,626 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-05T00:23:45,629 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45273/user/jenkins/test-data/7f70659c-3a14-bb99-79d3-4868f5326e37/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-05T00:23:45,630 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45273/user/jenkins/test-data/7f70659c-3a14-bb99-79d3-4868f5326e37/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-05T00:23:45,635 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-05T00:23:45,636 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-05T00:23:45,639 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-05T00:23:45,642 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-05T00:23:45,647 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45273/user/jenkins/test-data/7f70659c-3a14-bb99-79d3-4868f5326e37/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-05T00:23:45,648 INFO [master/2113c16e5528:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=753462, jitterRate=-0.04192458093166351}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-05T00:23:45,654 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733358225557Initializing all the Stores at 1733358225559 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733358225559Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733358225560 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733358225560Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733358225560Cleaning up temporary data from old regions at 1733358225636 (+76 ms)Region opened successfully at 1733358225654 (+18 ms) 2024-12-05T00:23:45,655 INFO [master/2113c16e5528:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-05T00:23:45,690 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5d6a3da7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=2113c16e5528/172.17.0.2:0 2024-12-05T00:23:45,721 INFO [master/2113c16e5528:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-05T00:23:45,733 INFO [master/2113c16e5528:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-05T00:23:45,733 INFO [master/2113c16e5528:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-05T00:23:45,736 INFO [master/2113c16e5528:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-05T00:23:45,737 INFO [master/2113c16e5528:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 1 msec 2024-12-05T00:23:45,742 INFO [master/2113c16e5528:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 4 msec 2024-12-05T00:23:45,742 INFO [master/2113c16e5528:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-05T00:23:45,767 INFO [master/2113c16e5528:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-05T00:23:45,777 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35931-0x10180016afe0000, quorum=127.0.0.1:51053, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-05T00:23:45,779 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-05T00:23:45,781 INFO [master/2113c16e5528:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-05T00:23:45,783 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35931-0x10180016afe0000, quorum=127.0.0.1:51053, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-05T00:23:45,784 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-05T00:23:45,787 INFO [master/2113c16e5528:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-05T00:23:45,790 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35931-0x10180016afe0000, quorum=127.0.0.1:51053, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-05T00:23:45,792 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-05T00:23:45,794 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35931-0x10180016afe0000, quorum=127.0.0.1:51053, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-05T00:23:45,795 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-05T00:23:45,813 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35931-0x10180016afe0000, quorum=127.0.0.1:51053, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-05T00:23:45,814 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-05T00:23:45,818 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35931-0x10180016afe0000, quorum=127.0.0.1:51053, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-05T00:23:45,818 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36827-0x10180016afe0001, quorum=127.0.0.1:51053, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-05T00:23:45,818 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35931-0x10180016afe0000, quorum=127.0.0.1:51053, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T00:23:45,819 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36827-0x10180016afe0001, quorum=127.0.0.1:51053, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T00:23:45,821 INFO [master/2113c16e5528:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=2113c16e5528,35931,1733358224328, sessionid=0x10180016afe0000, setting cluster-up flag (Was=false) 2024-12-05T00:23:45,835 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35931-0x10180016afe0000, quorum=127.0.0.1:51053, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T00:23:45,835 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36827-0x10180016afe0001, quorum=127.0.0.1:51053, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T00:23:45,844 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-05T00:23:45,846 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=2113c16e5528,35931,1733358224328 2024-12-05T00:23:45,852 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35931-0x10180016afe0000, quorum=127.0.0.1:51053, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T00:23:45,852 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36827-0x10180016afe0001, quorum=127.0.0.1:51053, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T00:23:45,858 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-05T00:23:45,860 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=2113c16e5528,35931,1733358224328 2024-12-05T00:23:45,866 INFO [master/2113c16e5528:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:45273/user/jenkins/test-data/7f70659c-3a14-bb99-79d3-4868f5326e37/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-05T00:23:45,937 INFO [RS:0;2113c16e5528:36827 {}] regionserver.HRegionServer(746): ClusterId : 3fb41781-a778-4b60-9ba3-ff65aa541172 2024-12-05T00:23:45,938 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-05T00:23:45,939 DEBUG [RS:0;2113c16e5528:36827 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-05T00:23:45,944 DEBUG [RS:0;2113c16e5528:36827 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-05T00:23:45,945 DEBUG [RS:0;2113c16e5528:36827 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-05T00:23:45,948 DEBUG [RS:0;2113c16e5528:36827 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-05T00:23:45,948 DEBUG [RS:0;2113c16e5528:36827 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2a857b59, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=2113c16e5528/172.17.0.2:0 2024-12-05T00:23:45,948 INFO [master/2113c16e5528:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-05T00:23:45,955 INFO [master/2113c16e5528:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-05T00:23:45,961 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 2113c16e5528,35931,1733358224328 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-05T00:23:45,964 DEBUG [RS:0;2113c16e5528:36827 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;2113c16e5528:36827 2024-12-05T00:23:45,967 INFO [RS:0;2113c16e5528:36827 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-05T00:23:45,967 INFO [RS:0;2113c16e5528:36827 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-05T00:23:45,967 DEBUG [RS:0;2113c16e5528:36827 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-05T00:23:45,968 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/2113c16e5528:0, corePoolSize=5, maxPoolSize=5 2024-12-05T00:23:45,969 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/2113c16e5528:0, corePoolSize=5, maxPoolSize=5 2024-12-05T00:23:45,969 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/2113c16e5528:0, corePoolSize=5, maxPoolSize=5 2024-12-05T00:23:45,969 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/2113c16e5528:0, corePoolSize=5, maxPoolSize=5 2024-12-05T00:23:45,969 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/2113c16e5528:0, corePoolSize=10, maxPoolSize=10 2024-12-05T00:23:45,969 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/2113c16e5528:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:23:45,970 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/2113c16e5528:0, corePoolSize=2, maxPoolSize=2 2024-12-05T00:23:45,970 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/2113c16e5528:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:23:45,970 INFO [RS:0;2113c16e5528:36827 {}] regionserver.HRegionServer(2659): reportForDuty to master=2113c16e5528,35931,1733358224328 with port=36827, startcode=1733358225045 2024-12-05T00:23:45,973 INFO [master/2113c16e5528:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733358255973 2024-12-05T00:23:45,974 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-05T00:23:45,975 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-05T00:23:45,975 INFO [master/2113c16e5528:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-05T00:23:45,976 INFO [master/2113c16e5528:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-05T00:23:45,981 INFO [master/2113c16e5528:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-05T00:23:45,981 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T00:23:45,981 INFO [master/2113c16e5528:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-05T00:23:45,981 INFO [master/2113c16e5528:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-05T00:23:45,981 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-05T00:23:45,981 INFO [master/2113c16e5528:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-05T00:23:45,984 DEBUG [RS:0;2113c16e5528:36827 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-05T00:23:45,983 INFO [master/2113c16e5528:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-05T00:23:45,986 INFO [master/2113c16e5528:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-05T00:23:45,987 INFO [master/2113c16e5528:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-05T00:23:45,988 INFO [master/2113c16e5528:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-05T00:23:45,993 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40023 is added to blk_1073741831_1007 (size=1321) 2024-12-05T00:23:45,994 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38077 is added to blk_1073741831_1007 (size=1321) 2024-12-05T00:23:45,996 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:45273/user/jenkins/test-data/7f70659c-3a14-bb99-79d3-4868f5326e37/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-05T00:23:45,996 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:45273/user/jenkins/test-data/7f70659c-3a14-bb99-79d3-4868f5326e37 2024-12-05T00:23:46,000 INFO [master/2113c16e5528:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-05T00:23:46,001 INFO [master/2113c16e5528:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-05T00:23:46,003 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/2113c16e5528:0:becomeActiveMaster-HFileCleaner.large.0-1733358226002,5,FailOnTimeoutGroup] 2024-12-05T00:23:46,006 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/2113c16e5528:0:becomeActiveMaster-HFileCleaner.small.0-1733358226003,5,FailOnTimeoutGroup] 2024-12-05T00:23:46,006 INFO [master/2113c16e5528:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-05T00:23:46,006 INFO [master/2113c16e5528:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-05T00:23:46,008 INFO [master/2113c16e5528:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-05T00:23:46,008 INFO [master/2113c16e5528:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-05T00:23:46,017 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40023 is added to blk_1073741832_1008 (size=32) 2024-12-05T00:23:46,018 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38077 is added to blk_1073741832_1008 (size=32) 2024-12-05T00:23:46,020 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T00:23:46,025 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-05T00:23:46,028 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-05T00:23:46,028 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T00:23:46,030 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-05T00:23:46,030 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-05T00:23:46,033 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-05T00:23:46,033 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T00:23:46,034 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-05T00:23:46,035 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-05T00:23:46,040 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-05T00:23:46,040 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T00:23:46,042 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-05T00:23:46,042 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-05T00:23:46,046 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-05T00:23:46,046 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T00:23:46,047 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-05T00:23:46,047 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-05T00:23:46,049 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45273/user/jenkins/test-data/7f70659c-3a14-bb99-79d3-4868f5326e37/data/hbase/meta/1588230740 2024-12-05T00:23:46,050 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45273/user/jenkins/test-data/7f70659c-3a14-bb99-79d3-4868f5326e37/data/hbase/meta/1588230740 2024-12-05T00:23:46,054 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-05T00:23:46,054 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-05T00:23:46,055 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-05T00:23:46,058 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-05T00:23:46,062 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45273/user/jenkins/test-data/7f70659c-3a14-bb99-79d3-4868f5326e37/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-05T00:23:46,063 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=806153, jitterRate=0.02507747709751129}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-05T00:23:46,066 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733358226020Initializing all the Stores at 1733358226022 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733358226023 (+1 ms)Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733358226024 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733358226024Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733358226024Cleaning up temporary data from old regions at 1733358226054 (+30 ms)Region opened successfully at 1733358226066 (+12 ms) 2024-12-05T00:23:46,066 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-05T00:23:46,066 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-05T00:23:46,066 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-05T00:23:46,066 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-05T00:23:46,066 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-05T00:23:46,069 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-05T00:23:46,069 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733358226066Disabling compacts and flushes for region at 1733358226066Disabling writes for close at 1733358226066Writing region close event to WAL at 1733358226068 (+2 ms)Closed at 1733358226069 (+1 ms) 2024-12-05T00:23:46,069 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56331, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-12-05T00:23:46,072 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-05T00:23:46,072 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-05T00:23:46,077 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35931 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 2113c16e5528,36827,1733358225045 2024-12-05T00:23:46,079 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-05T00:23:46,081 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35931 {}] master.ServerManager(517): Registering regionserver=2113c16e5528,36827,1733358225045 2024-12-05T00:23:46,088 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-05T00:23:46,090 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-05T00:23:46,099 DEBUG [RS:0;2113c16e5528:36827 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:45273/user/jenkins/test-data/7f70659c-3a14-bb99-79d3-4868f5326e37 2024-12-05T00:23:46,099 DEBUG [RS:0;2113c16e5528:36827 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:45273 2024-12-05T00:23:46,099 DEBUG [RS:0;2113c16e5528:36827 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-05T00:23:46,104 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35931-0x10180016afe0000, quorum=127.0.0.1:51053, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-05T00:23:46,105 DEBUG [RS:0;2113c16e5528:36827 {}] zookeeper.ZKUtil(111): regionserver:36827-0x10180016afe0001, quorum=127.0.0.1:51053, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/2113c16e5528,36827,1733358225045 2024-12-05T00:23:46,105 WARN [RS:0;2113c16e5528:36827 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-05T00:23:46,105 INFO [RS:0;2113c16e5528:36827 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-05T00:23:46,106 DEBUG [RS:0;2113c16e5528:36827 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:45273/user/jenkins/test-data/7f70659c-3a14-bb99-79d3-4868f5326e37/WALs/2113c16e5528,36827,1733358225045 2024-12-05T00:23:46,108 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [2113c16e5528,36827,1733358225045] 2024-12-05T00:23:46,134 INFO [RS:0;2113c16e5528:36827 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-05T00:23:46,147 INFO [RS:0;2113c16e5528:36827 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-05T00:23:46,152 INFO [RS:0;2113c16e5528:36827 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-05T00:23:46,153 INFO [RS:0;2113c16e5528:36827 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-05T00:23:46,154 INFO [RS:0;2113c16e5528:36827 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-05T00:23:46,160 INFO [RS:0;2113c16e5528:36827 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-05T00:23:46,161 INFO [RS:0;2113c16e5528:36827 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-05T00:23:46,161 DEBUG [RS:0;2113c16e5528:36827 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/2113c16e5528:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:23:46,162 DEBUG [RS:0;2113c16e5528:36827 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/2113c16e5528:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:23:46,162 DEBUG [RS:0;2113c16e5528:36827 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/2113c16e5528:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:23:46,162 DEBUG [RS:0;2113c16e5528:36827 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/2113c16e5528:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:23:46,162 DEBUG [RS:0;2113c16e5528:36827 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/2113c16e5528:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:23:46,162 DEBUG [RS:0;2113c16e5528:36827 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/2113c16e5528:0, corePoolSize=2, maxPoolSize=2 2024-12-05T00:23:46,162 DEBUG [RS:0;2113c16e5528:36827 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/2113c16e5528:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:23:46,163 DEBUG [RS:0;2113c16e5528:36827 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/2113c16e5528:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:23:46,163 DEBUG [RS:0;2113c16e5528:36827 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/2113c16e5528:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:23:46,163 DEBUG [RS:0;2113c16e5528:36827 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/2113c16e5528:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:23:46,163 DEBUG [RS:0;2113c16e5528:36827 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/2113c16e5528:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:23:46,163 DEBUG [RS:0;2113c16e5528:36827 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/2113c16e5528:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:23:46,163 DEBUG [RS:0;2113c16e5528:36827 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0, corePoolSize=3, maxPoolSize=3 2024-12-05T00:23:46,163 DEBUG [RS:0;2113c16e5528:36827 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/2113c16e5528:0, corePoolSize=3, maxPoolSize=3 2024-12-05T00:23:46,164 INFO [RS:0;2113c16e5528:36827 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-05T00:23:46,165 INFO [RS:0;2113c16e5528:36827 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-05T00:23:46,165 INFO [RS:0;2113c16e5528:36827 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-05T00:23:46,165 INFO [RS:0;2113c16e5528:36827 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-05T00:23:46,165 INFO [RS:0;2113c16e5528:36827 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-05T00:23:46,165 INFO [RS:0;2113c16e5528:36827 {}] hbase.ChoreService(168): Chore ScheduledChore name=2113c16e5528,36827,1733358225045-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-05T00:23:46,183 INFO [RS:0;2113c16e5528:36827 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-05T00:23:46,185 INFO [RS:0;2113c16e5528:36827 {}] hbase.ChoreService(168): Chore ScheduledChore name=2113c16e5528,36827,1733358225045-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-05T00:23:46,186 INFO [RS:0;2113c16e5528:36827 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-05T00:23:46,186 INFO [RS:0;2113c16e5528:36827 {}] regionserver.Replication(171): 2113c16e5528,36827,1733358225045 started 2024-12-05T00:23:46,203 INFO [RS:0;2113c16e5528:36827 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-05T00:23:46,204 INFO [RS:0;2113c16e5528:36827 {}] regionserver.HRegionServer(1482): Serving as 2113c16e5528,36827,1733358225045, RpcServer on 2113c16e5528/172.17.0.2:36827, sessionid=0x10180016afe0001 2024-12-05T00:23:46,204 DEBUG [RS:0;2113c16e5528:36827 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-05T00:23:46,205 DEBUG [RS:0;2113c16e5528:36827 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 2113c16e5528,36827,1733358225045 2024-12-05T00:23:46,205 DEBUG [RS:0;2113c16e5528:36827 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '2113c16e5528,36827,1733358225045' 2024-12-05T00:23:46,205 DEBUG [RS:0;2113c16e5528:36827 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-05T00:23:46,207 DEBUG [RS:0;2113c16e5528:36827 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-05T00:23:46,210 DEBUG [RS:0;2113c16e5528:36827 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-05T00:23:46,210 DEBUG [RS:0;2113c16e5528:36827 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-05T00:23:46,210 DEBUG [RS:0;2113c16e5528:36827 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 2113c16e5528,36827,1733358225045 2024-12-05T00:23:46,210 DEBUG [RS:0;2113c16e5528:36827 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '2113c16e5528,36827,1733358225045' 2024-12-05T00:23:46,210 DEBUG [RS:0;2113c16e5528:36827 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-05T00:23:46,211 DEBUG [RS:0;2113c16e5528:36827 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-05T00:23:46,212 DEBUG [RS:0;2113c16e5528:36827 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-05T00:23:46,212 INFO [RS:0;2113c16e5528:36827 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-05T00:23:46,212 INFO [RS:0;2113c16e5528:36827 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-05T00:23:46,241 WARN [2113c16e5528:35931 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-05T00:23:46,320 INFO [RS:0;2113c16e5528:36827 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=2113c16e5528%2C36827%2C1733358225045, suffix=, logDir=hdfs://localhost:45273/user/jenkins/test-data/7f70659c-3a14-bb99-79d3-4868f5326e37/WALs/2113c16e5528,36827,1733358225045, archiveDir=hdfs://localhost:45273/user/jenkins/test-data/7f70659c-3a14-bb99-79d3-4868f5326e37/oldWALs, maxLogs=32 2024-12-05T00:23:46,322 INFO [RS:0;2113c16e5528:36827 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 2113c16e5528%2C36827%2C1733358225045.1733358226322 2024-12-05T00:23:46,331 INFO [RS:0;2113c16e5528:36827 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/7f70659c-3a14-bb99-79d3-4868f5326e37/WALs/2113c16e5528,36827,1733358225045/2113c16e5528%2C36827%2C1733358225045.1733358226322 2024-12-05T00:23:46,332 DEBUG [RS:0;2113c16e5528:36827 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38573:38573),(127.0.0.1/127.0.0.1:36099:36099)] 2024-12-05T00:23:46,494 DEBUG [2113c16e5528:35931 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-05T00:23:46,506 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=2113c16e5528,36827,1733358225045 2024-12-05T00:23:46,513 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 2113c16e5528,36827,1733358225045, state=OPENING 2024-12-05T00:23:46,519 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-05T00:23:46,521 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35931-0x10180016afe0000, quorum=127.0.0.1:51053, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T00:23:46,521 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36827-0x10180016afe0001, quorum=127.0.0.1:51053, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T00:23:46,522 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-05T00:23:46,522 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-05T00:23:46,523 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-05T00:23:46,525 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=2113c16e5528,36827,1733358225045}] 2024-12-05T00:23:46,700 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-05T00:23:46,704 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59953, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-05T00:23:46,715 INFO [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-05T00:23:46,716 INFO [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-05T00:23:46,720 INFO [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=2113c16e5528%2C36827%2C1733358225045.meta, suffix=.meta, logDir=hdfs://localhost:45273/user/jenkins/test-data/7f70659c-3a14-bb99-79d3-4868f5326e37/WALs/2113c16e5528,36827,1733358225045, archiveDir=hdfs://localhost:45273/user/jenkins/test-data/7f70659c-3a14-bb99-79d3-4868f5326e37/oldWALs, maxLogs=32 2024-12-05T00:23:46,723 INFO [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 2113c16e5528%2C36827%2C1733358225045.meta.1733358226722.meta 2024-12-05T00:23:46,731 INFO [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/7f70659c-3a14-bb99-79d3-4868f5326e37/WALs/2113c16e5528,36827,1733358225045/2113c16e5528%2C36827%2C1733358225045.meta.1733358226722.meta 2024-12-05T00:23:46,735 DEBUG [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38573:38573),(127.0.0.1/127.0.0.1:36099:36099)] 2024-12-05T00:23:46,736 DEBUG [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-05T00:23:46,738 DEBUG [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-05T00:23:46,741 DEBUG [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-05T00:23:46,747 INFO [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-05T00:23:46,752 DEBUG [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-05T00:23:46,752 DEBUG [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T00:23:46,753 DEBUG [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-05T00:23:46,753 DEBUG [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-05T00:23:46,756 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-05T00:23:46,758 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-05T00:23:46,758 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T00:23:46,759 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-05T00:23:46,759 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-05T00:23:46,760 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-05T00:23:46,760 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T00:23:46,761 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-05T00:23:46,761 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-05T00:23:46,762 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-05T00:23:46,763 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T00:23:46,763 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-05T00:23:46,763 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-05T00:23:46,765 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-05T00:23:46,765 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T00:23:46,765 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-05T00:23:46,766 DEBUG [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-05T00:23:46,767 DEBUG [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45273/user/jenkins/test-data/7f70659c-3a14-bb99-79d3-4868f5326e37/data/hbase/meta/1588230740 2024-12-05T00:23:46,769 DEBUG [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45273/user/jenkins/test-data/7f70659c-3a14-bb99-79d3-4868f5326e37/data/hbase/meta/1588230740 2024-12-05T00:23:46,771 DEBUG [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-05T00:23:46,771 DEBUG [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-05T00:23:46,772 DEBUG [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-05T00:23:46,774 DEBUG [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-05T00:23:46,776 INFO [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=770995, jitterRate=-0.01963008940219879}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-05T00:23:46,776 DEBUG [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-05T00:23:46,777 DEBUG [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733358226753Writing region info on filesystem at 1733358226754 (+1 ms)Initializing all the Stores at 1733358226756 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733358226756Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733358226756Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733358226756Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733358226756Cleaning up temporary data from old regions at 1733358226771 (+15 ms)Running coprocessor post-open hooks at 1733358226776 (+5 ms)Region opened successfully at 1733358226777 (+1 ms) 2024-12-05T00:23:46,784 INFO [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733358226691 2024-12-05T00:23:46,795 DEBUG [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-05T00:23:46,795 INFO [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-05T00:23:46,797 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=2113c16e5528,36827,1733358225045 2024-12-05T00:23:46,799 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 2113c16e5528,36827,1733358225045, state=OPEN 2024-12-05T00:23:46,805 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35931-0x10180016afe0000, quorum=127.0.0.1:51053, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-05T00:23:46,805 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36827-0x10180016afe0001, quorum=127.0.0.1:51053, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-05T00:23:46,805 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-05T00:23:46,805 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-05T00:23:46,806 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=2113c16e5528,36827,1733358225045 2024-12-05T00:23:46,811 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-05T00:23:46,811 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=2113c16e5528,36827,1733358225045 in 281 msec 2024-12-05T00:23:46,818 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-05T00:23:46,818 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 734 msec 2024-12-05T00:23:46,819 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-05T00:23:46,820 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-05T00:23:46,840 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-05T00:23:46,842 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=2113c16e5528,36827,1733358225045, seqNum=-1] 2024-12-05T00:23:46,863 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-05T00:23:46,865 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37261, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-05T00:23:46,890 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 996 msec 2024-12-05T00:23:46,890 INFO [master/2113c16e5528:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733358226890, completionTime=-1 2024-12-05T00:23:46,892 INFO [master/2113c16e5528:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-05T00:23:46,893 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-05T00:23:46,919 INFO [master/2113c16e5528:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-12-05T00:23:46,919 INFO [master/2113c16e5528:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733358286919 2024-12-05T00:23:46,920 INFO [master/2113c16e5528:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733358346920 2024-12-05T00:23:46,920 INFO [master/2113c16e5528:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 27 msec 2024-12-05T00:23:46,922 INFO [master/2113c16e5528:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=2113c16e5528,35931,1733358224328-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-05T00:23:46,923 INFO [master/2113c16e5528:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=2113c16e5528,35931,1733358224328-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-05T00:23:46,923 INFO [master/2113c16e5528:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=2113c16e5528,35931,1733358224328-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-05T00:23:46,924 INFO [master/2113c16e5528:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-2113c16e5528:35931, period=300000, unit=MILLISECONDS is enabled. 2024-12-05T00:23:46,925 INFO [master/2113c16e5528:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-05T00:23:46,925 INFO [master/2113c16e5528:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-05T00:23:46,931 DEBUG [master/2113c16e5528:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-05T00:23:46,953 INFO [master/2113c16e5528:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.766sec 2024-12-05T00:23:46,954 INFO [master/2113c16e5528:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-05T00:23:46,955 INFO [master/2113c16e5528:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-05T00:23:46,956 INFO [master/2113c16e5528:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-05T00:23:46,957 INFO [master/2113c16e5528:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-05T00:23:46,957 INFO [master/2113c16e5528:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-05T00:23:46,958 INFO [master/2113c16e5528:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=2113c16e5528,35931,1733358224328-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-05T00:23:46,958 INFO [master/2113c16e5528:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=2113c16e5528,35931,1733358224328-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-05T00:23:46,966 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-05T00:23:46,967 INFO [master/2113c16e5528:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-05T00:23:46,968 INFO [master/2113c16e5528:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=2113c16e5528,35931,1733358224328-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-05T00:23:47,047 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6c60f60f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T00:23:47,050 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-12-05T00:23:47,050 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-12-05T00:23:47,053 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 2113c16e5528,35931,-1 for getting cluster id 2024-12-05T00:23:47,056 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-05T00:23:47,064 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '3fb41781-a778-4b60-9ba3-ff65aa541172' 2024-12-05T00:23:47,067 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-05T00:23:47,067 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "3fb41781-a778-4b60-9ba3-ff65aa541172" 2024-12-05T00:23:47,067 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2fdec8a4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T00:23:47,068 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [2113c16e5528,35931,-1] 2024-12-05T00:23:47,070 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-05T00:23:47,072 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T00:23:47,074 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43168, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-05T00:23:47,076 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@ae5d858, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T00:23:47,077 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-05T00:23:47,084 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=2113c16e5528,36827,1733358225045, seqNum=-1] 2024-12-05T00:23:47,085 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-05T00:23:47,087 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52058, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-05T00:23:47,108 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=2113c16e5528,35931,1733358224328 2024-12-05T00:23:47,108 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-05T00:23:47,116 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-12-05T00:23:47,120 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-05T00:23:47,125 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.AsyncConnectionImpl(321): The fetched master address is 2113c16e5528,35931,1733358224328 2024-12-05T00:23:47,128 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@7f2f240b 2024-12-05T00:23:47,129 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-05T00:23:47,131 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43172, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-05T00:23:47,133 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35931 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-12-05T00:23:47,133 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35931 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-12-05T00:23:47,137 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35931 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testSlowSyncLogRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-05T00:23:47,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35931 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling 2024-12-05T00:23:47,147 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-12-05T00:23:47,149 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35931 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testSlowSyncLogRolling" procId is: 4 2024-12-05T00:23:47,150 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T00:23:47,152 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-05T00:23:47,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35931 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-05T00:23:47,184 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38077 is added to blk_1073741835_1011 (size=389) 2024-12-05T00:23:47,185 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40023 is added to blk_1073741835_1011 (size=389) 2024-12-05T00:23:47,187 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 32f4c586ca315abab29a77f6fcad6119, NAME => 'TestLogRolling-testSlowSyncLogRolling,,1733358227133.32f4c586ca315abab29a77f6fcad6119.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testSlowSyncLogRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:45273/user/jenkins/test-data/7f70659c-3a14-bb99-79d3-4868f5326e37 2024-12-05T00:23:47,198 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40023 is added to blk_1073741836_1012 (size=72) 2024-12-05T00:23:47,198 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38077 is added to blk_1073741836_1012 (size=72) 2024-12-05T00:23:47,199 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testSlowSyncLogRolling,,1733358227133.32f4c586ca315abab29a77f6fcad6119.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T00:23:47,200 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1722): Closing 32f4c586ca315abab29a77f6fcad6119, disabling compactions & flushes 2024-12-05T00:23:47,200 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testSlowSyncLogRolling,,1733358227133.32f4c586ca315abab29a77f6fcad6119. 2024-12-05T00:23:47,200 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testSlowSyncLogRolling,,1733358227133.32f4c586ca315abab29a77f6fcad6119. 2024-12-05T00:23:47,200 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testSlowSyncLogRolling,,1733358227133.32f4c586ca315abab29a77f6fcad6119. after waiting 0 ms 2024-12-05T00:23:47,200 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testSlowSyncLogRolling,,1733358227133.32f4c586ca315abab29a77f6fcad6119. 2024-12-05T00:23:47,200 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testSlowSyncLogRolling,,1733358227133.32f4c586ca315abab29a77f6fcad6119. 2024-12-05T00:23:47,200 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for 32f4c586ca315abab29a77f6fcad6119: Waiting for close lock at 1733358227200Disabling compacts and flushes for region at 1733358227200Disabling writes for close at 1733358227200Writing region close event to WAL at 1733358227200Closed at 1733358227200 2024-12-05T00:23:47,202 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_ADD_TO_META 2024-12-05T00:23:47,207 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testSlowSyncLogRolling,,1733358227133.32f4c586ca315abab29a77f6fcad6119.","families":{"info":[{"qualifier":"regioninfo","vlen":71,"tag":[],"timestamp":"1733358227202"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733358227202"}]},"ts":"1733358227202"} 2024-12-05T00:23:47,213 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-05T00:23:47,215 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-05T00:23:47,235 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testSlowSyncLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733358227215"}]},"ts":"1733358227215"} 2024-12-05T00:23:47,241 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testSlowSyncLogRolling, state=ENABLING in hbase:meta 2024-12-05T00:23:47,244 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=32f4c586ca315abab29a77f6fcad6119, ASSIGN}] 2024-12-05T00:23:47,247 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=32f4c586ca315abab29a77f6fcad6119, ASSIGN 2024-12-05T00:23:47,249 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=32f4c586ca315abab29a77f6fcad6119, ASSIGN; state=OFFLINE, location=2113c16e5528,36827,1733358225045; forceNewPlan=false, retain=false 2024-12-05T00:23:47,400 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=32f4c586ca315abab29a77f6fcad6119, regionState=OPENING, regionLocation=2113c16e5528,36827,1733358225045 2024-12-05T00:23:47,404 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=32f4c586ca315abab29a77f6fcad6119, ASSIGN because future has completed 2024-12-05T00:23:47,405 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 32f4c586ca315abab29a77f6fcad6119, server=2113c16e5528,36827,1733358225045}] 2024-12-05T00:23:47,566 INFO [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testSlowSyncLogRolling,,1733358227133.32f4c586ca315abab29a77f6fcad6119. 2024-12-05T00:23:47,566 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 32f4c586ca315abab29a77f6fcad6119, NAME => 'TestLogRolling-testSlowSyncLogRolling,,1733358227133.32f4c586ca315abab29a77f6fcad6119.', STARTKEY => '', ENDKEY => ''} 2024-12-05T00:23:47,567 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testSlowSyncLogRolling 32f4c586ca315abab29a77f6fcad6119 2024-12-05T00:23:47,567 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testSlowSyncLogRolling,,1733358227133.32f4c586ca315abab29a77f6fcad6119.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T00:23:47,567 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 32f4c586ca315abab29a77f6fcad6119 2024-12-05T00:23:47,567 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 32f4c586ca315abab29a77f6fcad6119 2024-12-05T00:23:47,569 INFO [StoreOpener-32f4c586ca315abab29a77f6fcad6119-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 32f4c586ca315abab29a77f6fcad6119 2024-12-05T00:23:47,572 INFO [StoreOpener-32f4c586ca315abab29a77f6fcad6119-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 32f4c586ca315abab29a77f6fcad6119 columnFamilyName info 2024-12-05T00:23:47,572 DEBUG [StoreOpener-32f4c586ca315abab29a77f6fcad6119-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T00:23:47,573 INFO [StoreOpener-32f4c586ca315abab29a77f6fcad6119-1 {}] regionserver.HStore(327): Store=32f4c586ca315abab29a77f6fcad6119/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T00:23:47,573 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 32f4c586ca315abab29a77f6fcad6119 2024-12-05T00:23:47,574 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45273/user/jenkins/test-data/7f70659c-3a14-bb99-79d3-4868f5326e37/data/default/TestLogRolling-testSlowSyncLogRolling/32f4c586ca315abab29a77f6fcad6119 2024-12-05T00:23:47,575 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45273/user/jenkins/test-data/7f70659c-3a14-bb99-79d3-4868f5326e37/data/default/TestLogRolling-testSlowSyncLogRolling/32f4c586ca315abab29a77f6fcad6119 2024-12-05T00:23:47,576 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 32f4c586ca315abab29a77f6fcad6119 2024-12-05T00:23:47,576 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 32f4c586ca315abab29a77f6fcad6119 2024-12-05T00:23:47,579 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 32f4c586ca315abab29a77f6fcad6119 2024-12-05T00:23:47,582 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45273/user/jenkins/test-data/7f70659c-3a14-bb99-79d3-4868f5326e37/data/default/TestLogRolling-testSlowSyncLogRolling/32f4c586ca315abab29a77f6fcad6119/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-05T00:23:47,583 INFO [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 32f4c586ca315abab29a77f6fcad6119; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=696177, jitterRate=-0.11476637423038483}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-05T00:23:47,583 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 32f4c586ca315abab29a77f6fcad6119 2024-12-05T00:23:47,584 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 32f4c586ca315abab29a77f6fcad6119: Running coprocessor pre-open hook at 1733358227567Writing region info on filesystem at 1733358227567Initializing all the Stores at 1733358227569 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733358227569Cleaning up temporary data from old regions at 1733358227576 (+7 ms)Running coprocessor post-open hooks at 1733358227583 (+7 ms)Region opened successfully at 1733358227584 (+1 ms) 2024-12-05T00:23:47,586 INFO [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testSlowSyncLogRolling,,1733358227133.32f4c586ca315abab29a77f6fcad6119., pid=6, masterSystemTime=1733358227559 2024-12-05T00:23:47,589 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testSlowSyncLogRolling,,1733358227133.32f4c586ca315abab29a77f6fcad6119. 2024-12-05T00:23:47,589 INFO [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testSlowSyncLogRolling,,1733358227133.32f4c586ca315abab29a77f6fcad6119. 2024-12-05T00:23:47,591 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=32f4c586ca315abab29a77f6fcad6119, regionState=OPEN, openSeqNum=2, regionLocation=2113c16e5528,36827,1733358225045 2024-12-05T00:23:47,594 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 32f4c586ca315abab29a77f6fcad6119, server=2113c16e5528,36827,1733358225045 because future has completed 2024-12-05T00:23:47,600 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-12-05T00:23:47,600 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 32f4c586ca315abab29a77f6fcad6119, server=2113c16e5528,36827,1733358225045 in 191 msec 2024-12-05T00:23:47,604 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-12-05T00:23:47,604 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=32f4c586ca315abab29a77f6fcad6119, ASSIGN in 356 msec 2024-12-05T00:23:47,606 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-05T00:23:47,606 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testSlowSyncLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733358227606"}]},"ts":"1733358227606"} 2024-12-05T00:23:47,609 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testSlowSyncLogRolling, state=ENABLED in hbase:meta 2024-12-05T00:23:47,611 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_POST_OPERATION 2024-12-05T00:23:47,614 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling in 471 msec 2024-12-05T00:23:52,257 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-12-05T00:23:52,309 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-05T00:23:52,310 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testSlowSyncLogRolling' 2024-12-05T00:23:54,744 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-05T00:23:54,744 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-12-05T00:23:54,746 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling 2024-12-05T00:23:54,746 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling Metrics about Tables on a single HBase RegionServer 2024-12-05T00:23:54,747 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-05T00:23:54,747 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-12-05T00:23:54,747 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-12-05T00:23:54,747 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-12-05T00:23:57,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35931 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-05T00:23:57,183 INFO [RPCClient-NioEventLoopGroup-4-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testSlowSyncLogRolling completed 2024-12-05T00:23:57,186 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testSlowSyncLogRolling,, stopping at row=TestLogRolling-testSlowSyncLogRolling ,, for max=2147483647 with caching=100 2024-12-05T00:23:57,194 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testSlowSyncLogRolling 2024-12-05T00:23:57,194 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testSlowSyncLogRolling,,1733358227133.32f4c586ca315abab29a77f6fcad6119. 2024-12-05T00:23:57,195 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 2113c16e5528%2C36827%2C1733358225045.1733358237195 2024-12-05T00:23:57,206 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:23:57,206 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:23:57,207 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:23:57,207 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:23:57,207 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:23:57,208 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/7f70659c-3a14-bb99-79d3-4868f5326e37/WALs/2113c16e5528,36827,1733358225045/2113c16e5528%2C36827%2C1733358225045.1733358226322 with entries=1, filesize=443 B; new WAL /user/jenkins/test-data/7f70659c-3a14-bb99-79d3-4868f5326e37/WALs/2113c16e5528,36827,1733358225045/2113c16e5528%2C36827%2C1733358225045.1733358237195 2024-12-05T00:23:57,213 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38573:38573),(127.0.0.1/127.0.0.1:36099:36099)] 2024-12-05T00:23:57,213 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:45273/user/jenkins/test-data/7f70659c-3a14-bb99-79d3-4868f5326e37/WALs/2113c16e5528,36827,1733358225045/2113c16e5528%2C36827%2C1733358225045.1733358226322 is not closed yet, will try archiving it next time 2024-12-05T00:23:57,213 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40023 is added to blk_1073741833_1009 (size=451) 2024-12-05T00:23:57,214 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38077 is added to blk_1073741833_1009 (size=451) 2024-12-05T00:23:57,219 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:45273/user/jenkins/test-data/7f70659c-3a14-bb99-79d3-4868f5326e37/WALs/2113c16e5528,36827,1733358225045/2113c16e5528%2C36827%2C1733358225045.1733358226322 to hdfs://localhost:45273/user/jenkins/test-data/7f70659c-3a14-bb99-79d3-4868f5326e37/oldWALs/2113c16e5528%2C36827%2C1733358225045.1733358226322 2024-12-05T00:23:57,223 DEBUG [RPCClient-NioEventLoopGroup-4-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testSlowSyncLogRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testSlowSyncLogRolling,,1733358227133.32f4c586ca315abab29a77f6fcad6119., hostname=2113c16e5528,36827,1733358225045, seqNum=2] 2024-12-05T00:24:09,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36827 {}] regionserver.HRegion(8855): Flush requested on 32f4c586ca315abab29a77f6fcad6119 2024-12-05T00:24:09,263 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 32f4c586ca315abab29a77f6fcad6119 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-05T00:24:09,326 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45273/user/jenkins/test-data/7f70659c-3a14-bb99-79d3-4868f5326e37/data/default/TestLogRolling-testSlowSyncLogRolling/32f4c586ca315abab29a77f6fcad6119/.tmp/info/15632f416b0f4b64a4b8ce2eab3437fa is 1080, key is row0001/info:/1733358237227/Put/seqid=0 2024-12-05T00:24:09,338 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40023 is added to blk_1073741838_1014 (size=12509) 2024-12-05T00:24:09,338 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38077 is added to blk_1073741838_1014 (size=12509) 2024-12-05T00:24:09,339 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:45273/user/jenkins/test-data/7f70659c-3a14-bb99-79d3-4868f5326e37/data/default/TestLogRolling-testSlowSyncLogRolling/32f4c586ca315abab29a77f6fcad6119/.tmp/info/15632f416b0f4b64a4b8ce2eab3437fa 2024-12-05T00:24:09,386 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45273/user/jenkins/test-data/7f70659c-3a14-bb99-79d3-4868f5326e37/data/default/TestLogRolling-testSlowSyncLogRolling/32f4c586ca315abab29a77f6fcad6119/.tmp/info/15632f416b0f4b64a4b8ce2eab3437fa as hdfs://localhost:45273/user/jenkins/test-data/7f70659c-3a14-bb99-79d3-4868f5326e37/data/default/TestLogRolling-testSlowSyncLogRolling/32f4c586ca315abab29a77f6fcad6119/info/15632f416b0f4b64a4b8ce2eab3437fa 2024-12-05T00:24:09,397 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45273/user/jenkins/test-data/7f70659c-3a14-bb99-79d3-4868f5326e37/data/default/TestLogRolling-testSlowSyncLogRolling/32f4c586ca315abab29a77f6fcad6119/info/15632f416b0f4b64a4b8ce2eab3437fa, entries=7, sequenceid=11, filesize=12.2 K 2024-12-05T00:24:09,405 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 32f4c586ca315abab29a77f6fcad6119 in 140ms, sequenceid=11, compaction requested=false 2024-12-05T00:24:09,407 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 32f4c586ca315abab29a77f6fcad6119: 2024-12-05T00:24:13,406 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-05T00:24:17,273 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 2113c16e5528%2C36827%2C1733358225045.1733358257273 2024-12-05T00:24:17,482 INFO [Time-limited test {}] wal.AbstractFSWAL(1368): Slow sync cost: 206 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38077,DS-b2e319e7-7be9-4588-9599-ac2f1d57f49d,DISK], DatanodeInfoWithStorage[127.0.0.1:40023,DS-a4e66060-ab46-4fa0-935e-7ea2399c198a,DISK]] 2024-12-05T00:24:17,482 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:24:17,482 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:24:17,483 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:24:17,483 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:24:17,483 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:24:17,483 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/7f70659c-3a14-bb99-79d3-4868f5326e37/WALs/2113c16e5528,36827,1733358225045/2113c16e5528%2C36827%2C1733358225045.1733358237195 with entries=12, filesize=12.10 KB; new WAL /user/jenkins/test-data/7f70659c-3a14-bb99-79d3-4868f5326e37/WALs/2113c16e5528,36827,1733358225045/2113c16e5528%2C36827%2C1733358225045.1733358257273 2024-12-05T00:24:17,484 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38573:38573),(127.0.0.1/127.0.0.1:36099:36099)] 2024-12-05T00:24:17,484 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:45273/user/jenkins/test-data/7f70659c-3a14-bb99-79d3-4868f5326e37/WALs/2113c16e5528,36827,1733358225045/2113c16e5528%2C36827%2C1733358225045.1733358237195 is not closed yet, will try archiving it next time 2024-12-05T00:24:17,486 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40023 is added to blk_1073741837_1013 (size=12399) 2024-12-05T00:24:17,486 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38077 is added to blk_1073741837_1013 (size=12399) 2024-12-05T00:24:17,687 INFO [FSHLog-0-hdfs://localhost:45273/user/jenkins/test-data/7f70659c-3a14-bb99-79d3-4868f5326e37-prefix:2113c16e5528,36827,1733358225045 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38077,DS-b2e319e7-7be9-4588-9599-ac2f1d57f49d,DISK], DatanodeInfoWithStorage[127.0.0.1:40023,DS-a4e66060-ab46-4fa0-935e-7ea2399c198a,DISK]] 2024-12-05T00:24:19,891 INFO [FSHLog-0-hdfs://localhost:45273/user/jenkins/test-data/7f70659c-3a14-bb99-79d3-4868f5326e37-prefix:2113c16e5528,36827,1733358225045 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38077,DS-b2e319e7-7be9-4588-9599-ac2f1d57f49d,DISK], DatanodeInfoWithStorage[127.0.0.1:40023,DS-a4e66060-ab46-4fa0-935e-7ea2399c198a,DISK]] 2024-12-05T00:24:22,095 INFO [FSHLog-0-hdfs://localhost:45273/user/jenkins/test-data/7f70659c-3a14-bb99-79d3-4868f5326e37-prefix:2113c16e5528,36827,1733358225045 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38077,DS-b2e319e7-7be9-4588-9599-ac2f1d57f49d,DISK], DatanodeInfoWithStorage[127.0.0.1:40023,DS-a4e66060-ab46-4fa0-935e-7ea2399c198a,DISK]] 2024-12-05T00:24:24,299 INFO [FSHLog-0-hdfs://localhost:45273/user/jenkins/test-data/7f70659c-3a14-bb99-79d3-4868f5326e37-prefix:2113c16e5528,36827,1733358225045 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38077,DS-b2e319e7-7be9-4588-9599-ac2f1d57f49d,DISK], DatanodeInfoWithStorage[127.0.0.1:40023,DS-a4e66060-ab46-4fa0-935e-7ea2399c198a,DISK]] 2024-12-05T00:24:24,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36827 {}] regionserver.HRegion(8855): Flush requested on 32f4c586ca315abab29a77f6fcad6119 2024-12-05T00:24:24,300 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 32f4c586ca315abab29a77f6fcad6119 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-05T00:24:24,502 INFO [FSHLog-0-hdfs://localhost:45273/user/jenkins/test-data/7f70659c-3a14-bb99-79d3-4868f5326e37-prefix:2113c16e5528,36827,1733358225045 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38077,DS-b2e319e7-7be9-4588-9599-ac2f1d57f49d,DISK], DatanodeInfoWithStorage[127.0.0.1:40023,DS-a4e66060-ab46-4fa0-935e-7ea2399c198a,DISK]] 2024-12-05T00:24:24,508 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45273/user/jenkins/test-data/7f70659c-3a14-bb99-79d3-4868f5326e37/data/default/TestLogRolling-testSlowSyncLogRolling/32f4c586ca315abab29a77f6fcad6119/.tmp/info/1e77a444346f4da2b5080b7ab658c738 is 1080, key is row0008/info:/1733358251263/Put/seqid=0 2024-12-05T00:24:24,516 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40023 is added to blk_1073741840_1016 (size=12509) 2024-12-05T00:24:24,517 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38077 is added to blk_1073741840_1016 (size=12509) 2024-12-05T00:24:24,517 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=21 (bloomFilter=true), to=hdfs://localhost:45273/user/jenkins/test-data/7f70659c-3a14-bb99-79d3-4868f5326e37/data/default/TestLogRolling-testSlowSyncLogRolling/32f4c586ca315abab29a77f6fcad6119/.tmp/info/1e77a444346f4da2b5080b7ab658c738 2024-12-05T00:24:24,529 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45273/user/jenkins/test-data/7f70659c-3a14-bb99-79d3-4868f5326e37/data/default/TestLogRolling-testSlowSyncLogRolling/32f4c586ca315abab29a77f6fcad6119/.tmp/info/1e77a444346f4da2b5080b7ab658c738 as hdfs://localhost:45273/user/jenkins/test-data/7f70659c-3a14-bb99-79d3-4868f5326e37/data/default/TestLogRolling-testSlowSyncLogRolling/32f4c586ca315abab29a77f6fcad6119/info/1e77a444346f4da2b5080b7ab658c738 2024-12-05T00:24:24,539 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45273/user/jenkins/test-data/7f70659c-3a14-bb99-79d3-4868f5326e37/data/default/TestLogRolling-testSlowSyncLogRolling/32f4c586ca315abab29a77f6fcad6119/info/1e77a444346f4da2b5080b7ab658c738, entries=7, sequenceid=21, filesize=12.2 K 2024-12-05T00:24:24,741 INFO [FSHLog-0-hdfs://localhost:45273/user/jenkins/test-data/7f70659c-3a14-bb99-79d3-4868f5326e37-prefix:2113c16e5528,36827,1733358225045 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38077,DS-b2e319e7-7be9-4588-9599-ac2f1d57f49d,DISK], DatanodeInfoWithStorage[127.0.0.1:40023,DS-a4e66060-ab46-4fa0-935e-7ea2399c198a,DISK]] 2024-12-05T00:24:24,741 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 32f4c586ca315abab29a77f6fcad6119 in 441ms, sequenceid=21, compaction requested=false 2024-12-05T00:24:24,741 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 32f4c586ca315abab29a77f6fcad6119: 2024-12-05T00:24:24,742 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=24.4 K, sizeToCheck=16.0 K 2024-12-05T00:24:24,742 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-05T00:24:24,743 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:45273/user/jenkins/test-data/7f70659c-3a14-bb99-79d3-4868f5326e37/data/default/TestLogRolling-testSlowSyncLogRolling/32f4c586ca315abab29a77f6fcad6119/info/15632f416b0f4b64a4b8ce2eab3437fa because midkey is the same as first or last row 2024-12-05T00:24:26,504 INFO [FSHLog-0-hdfs://localhost:45273/user/jenkins/test-data/7f70659c-3a14-bb99-79d3-4868f5326e37-prefix:2113c16e5528,36827,1733358225045 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38077,DS-b2e319e7-7be9-4588-9599-ac2f1d57f49d,DISK], DatanodeInfoWithStorage[127.0.0.1:40023,DS-a4e66060-ab46-4fa0-935e-7ea2399c198a,DISK]] 2024-12-05T00:24:27,316 INFO [master/2113c16e5528:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-12-05T00:24:27,316 INFO [master/2113c16e5528:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-12-05T00:24:28,707 INFO [FSHLog-0-hdfs://localhost:45273/user/jenkins/test-data/7f70659c-3a14-bb99-79d3-4868f5326e37-prefix:2113c16e5528,36827,1733358225045 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38077,DS-b2e319e7-7be9-4588-9599-ac2f1d57f49d,DISK], DatanodeInfoWithStorage[127.0.0.1:40023,DS-a4e66060-ab46-4fa0-935e-7ea2399c198a,DISK]] 2024-12-05T00:24:28,710 WARN [FSHLog-0-hdfs://localhost:45273/user/jenkins/test-data/7f70659c-3a14-bb99-79d3-4868f5326e37-prefix:2113c16e5528,36827,1733358225045 {}] wal.AbstractFSWAL(2201): Requesting log roll because we exceeded slow sync threshold; count=8, threshold=5, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38077,DS-b2e319e7-7be9-4588-9599-ac2f1d57f49d,DISK], DatanodeInfoWithStorage[127.0.0.1:40023,DS-a4e66060-ab46-4fa0-935e-7ea2399c198a,DISK]] 2024-12-05T00:24:28,711 DEBUG [regionserver/2113c16e5528:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 2113c16e5528%2C36827%2C1733358225045:(num 1733358257273) roll requested 2024-12-05T00:24:28,711 INFO [regionserver/2113c16e5528:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 2113c16e5528%2C36827%2C1733358225045.1733358268711 2024-12-05T00:24:28,919 INFO [regionserver/2113c16e5528:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 205 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38077,DS-b2e319e7-7be9-4588-9599-ac2f1d57f49d,DISK], DatanodeInfoWithStorage[127.0.0.1:40023,DS-a4e66060-ab46-4fa0-935e-7ea2399c198a,DISK]] 2024-12-05T00:24:28,919 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:24:28,919 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:24:28,919 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:24:28,920 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:24:28,920 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:24:28,920 INFO [regionserver/2113c16e5528:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/7f70659c-3a14-bb99-79d3-4868f5326e37/WALs/2113c16e5528,36827,1733358225045/2113c16e5528%2C36827%2C1733358225045.1733358257273 with entries=8, filesize=7.55 KB; new WAL /user/jenkins/test-data/7f70659c-3a14-bb99-79d3-4868f5326e37/WALs/2113c16e5528,36827,1733358225045/2113c16e5528%2C36827%2C1733358225045.1733358268711 2024-12-05T00:24:28,921 DEBUG [regionserver/2113c16e5528:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36099:36099),(127.0.0.1/127.0.0.1:38573:38573)] 2024-12-05T00:24:28,921 DEBUG [regionserver/2113c16e5528:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:45273/user/jenkins/test-data/7f70659c-3a14-bb99-79d3-4868f5326e37/WALs/2113c16e5528,36827,1733358225045/2113c16e5528%2C36827%2C1733358225045.1733358257273 is not closed yet, will try archiving it next time 2024-12-05T00:24:28,921 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:45273/user/jenkins/test-data/7f70659c-3a14-bb99-79d3-4868f5326e37/WALs/2113c16e5528,36827,1733358225045/2113c16e5528%2C36827%2C1733358225045.1733358237195 to hdfs://localhost:45273/user/jenkins/test-data/7f70659c-3a14-bb99-79d3-4868f5326e37/oldWALs/2113c16e5528%2C36827%2C1733358225045.1733358237195 2024-12-05T00:24:28,922 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40023 is added to blk_1073741839_1015 (size=7739) 2024-12-05T00:24:28,923 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38077 is added to blk_1073741839_1015 (size=7739) 2024-12-05T00:24:30,911 INFO [FSHLog-0-hdfs://localhost:45273/user/jenkins/test-data/7f70659c-3a14-bb99-79d3-4868f5326e37-prefix:2113c16e5528,36827,1733358225045 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:40023,DS-a4e66060-ab46-4fa0-935e-7ea2399c198a,DISK], DatanodeInfoWithStorage[127.0.0.1:38077,DS-b2e319e7-7be9-4588-9599-ac2f1d57f49d,DISK]] 2024-12-05T00:24:32,567 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 32f4c586ca315abab29a77f6fcad6119, had cached 0 bytes from a total of 25018 2024-12-05T00:24:33,115 INFO [FSHLog-0-hdfs://localhost:45273/user/jenkins/test-data/7f70659c-3a14-bb99-79d3-4868f5326e37-prefix:2113c16e5528,36827,1733358225045 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:40023,DS-a4e66060-ab46-4fa0-935e-7ea2399c198a,DISK], DatanodeInfoWithStorage[127.0.0.1:38077,DS-b2e319e7-7be9-4588-9599-ac2f1d57f49d,DISK]] 2024-12-05T00:24:35,319 INFO [FSHLog-0-hdfs://localhost:45273/user/jenkins/test-data/7f70659c-3a14-bb99-79d3-4868f5326e37-prefix:2113c16e5528,36827,1733358225045 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:40023,DS-a4e66060-ab46-4fa0-935e-7ea2399c198a,DISK], DatanodeInfoWithStorage[127.0.0.1:38077,DS-b2e319e7-7be9-4588-9599-ac2f1d57f49d,DISK]] 2024-12-05T00:24:37,524 INFO [FSHLog-0-hdfs://localhost:45273/user/jenkins/test-data/7f70659c-3a14-bb99-79d3-4868f5326e37-prefix:2113c16e5528,36827,1733358225045 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:40023,DS-a4e66060-ab46-4fa0-935e-7ea2399c198a,DISK], DatanodeInfoWithStorage[127.0.0.1:38077,DS-b2e319e7-7be9-4588-9599-ac2f1d57f49d,DISK]] 2024-12-05T00:24:39,527 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-12-05T00:24:39,527 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 2113c16e5528%2C36827%2C1733358225045.1733358279527 2024-12-05T00:24:43,406 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-05T00:24:44,537 INFO [Time-limited test {}] wal.AbstractFSWAL(1368): Slow sync cost: 5006 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:40023,DS-a4e66060-ab46-4fa0-935e-7ea2399c198a,DISK], DatanodeInfoWithStorage[127.0.0.1:38077,DS-b2e319e7-7be9-4588-9599-ac2f1d57f49d,DISK]] 2024-12-05T00:24:44,539 WARN [Time-limited test {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5006 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:40023,DS-a4e66060-ab46-4fa0-935e-7ea2399c198a,DISK], DatanodeInfoWithStorage[127.0.0.1:38077,DS-b2e319e7-7be9-4588-9599-ac2f1d57f49d,DISK]] 2024-12-05T00:24:44,540 DEBUG [regionserver/2113c16e5528:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 2113c16e5528%2C36827%2C1733358225045:(num 1733358279527) roll requested 2024-12-05T00:24:44,540 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:24:44,540 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:24:44,540 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:24:44,540 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:24:44,541 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:24:44,541 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/7f70659c-3a14-bb99-79d3-4868f5326e37/WALs/2113c16e5528,36827,1733358225045/2113c16e5528%2C36827%2C1733358225045.1733358268711 with entries=4, filesize=4.63 KB; new WAL /user/jenkins/test-data/7f70659c-3a14-bb99-79d3-4868f5326e37/WALs/2113c16e5528,36827,1733358225045/2113c16e5528%2C36827%2C1733358225045.1733358279527 2024-12-05T00:24:44,544 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38077 is added to blk_1073741841_1017 (size=4753) 2024-12-05T00:24:44,544 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40023 is added to blk_1073741841_1017 (size=4753) 2024-12-05T00:24:44,544 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38573:38573),(127.0.0.1/127.0.0.1:36099:36099)] 2024-12-05T00:24:44,545 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:45273/user/jenkins/test-data/7f70659c-3a14-bb99-79d3-4868f5326e37/WALs/2113c16e5528,36827,1733358225045/2113c16e5528%2C36827%2C1733358225045.1733358268711 is not closed yet, will try archiving it next time 2024-12-05T00:24:44,545 INFO [regionserver/2113c16e5528:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 2113c16e5528%2C36827%2C1733358225045.1733358284545 2024-12-05T00:24:49,548 INFO [FSHLog-0-hdfs://localhost:45273/user/jenkins/test-data/7f70659c-3a14-bb99-79d3-4868f5326e37-prefix:2113c16e5528,36827,1733358225045 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5001 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38077,DS-b2e319e7-7be9-4588-9599-ac2f1d57f49d,DISK], DatanodeInfoWithStorage[127.0.0.1:40023,DS-a4e66060-ab46-4fa0-935e-7ea2399c198a,DISK]] 2024-12-05T00:24:49,549 WARN [FSHLog-0-hdfs://localhost:45273/user/jenkins/test-data/7f70659c-3a14-bb99-79d3-4868f5326e37-prefix:2113c16e5528,36827,1733358225045 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5001 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38077,DS-b2e319e7-7be9-4588-9599-ac2f1d57f49d,DISK], DatanodeInfoWithStorage[127.0.0.1:40023,DS-a4e66060-ab46-4fa0-935e-7ea2399c198a,DISK]] 2024-12-05T00:24:49,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36827 {}] regionserver.HRegion(8855): Flush requested on 32f4c586ca315abab29a77f6fcad6119 2024-12-05T00:24:49,549 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 32f4c586ca315abab29a77f6fcad6119 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-05T00:24:49,554 INFO [regionserver/2113c16e5528:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 5005 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38077,DS-b2e319e7-7be9-4588-9599-ac2f1d57f49d,DISK], DatanodeInfoWithStorage[127.0.0.1:40023,DS-a4e66060-ab46-4fa0-935e-7ea2399c198a,DISK]] 2024-12-05T00:24:49,554 WARN [regionserver/2113c16e5528:0.logRoller {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5005 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38077,DS-b2e319e7-7be9-4588-9599-ac2f1d57f49d,DISK], DatanodeInfoWithStorage[127.0.0.1:40023,DS-a4e66060-ab46-4fa0-935e-7ea2399c198a,DISK]] 2024-12-05T00:24:51,550 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-12-05T00:24:54,551 INFO [FSHLog-0-hdfs://localhost:45273/user/jenkins/test-data/7f70659c-3a14-bb99-79d3-4868f5326e37-prefix:2113c16e5528,36827,1733358225045 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5001 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38077,DS-b2e319e7-7be9-4588-9599-ac2f1d57f49d,DISK], DatanodeInfoWithStorage[127.0.0.1:40023,DS-a4e66060-ab46-4fa0-935e-7ea2399c198a,DISK]] 2024-12-05T00:24:54,552 WARN [FSHLog-0-hdfs://localhost:45273/user/jenkins/test-data/7f70659c-3a14-bb99-79d3-4868f5326e37-prefix:2113c16e5528,36827,1733358225045 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5001 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38077,DS-b2e319e7-7be9-4588-9599-ac2f1d57f49d,DISK], DatanodeInfoWithStorage[127.0.0.1:40023,DS-a4e66060-ab46-4fa0-935e-7ea2399c198a,DISK]] 2024-12-05T00:24:54,552 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:24:54,552 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:24:54,552 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:24:54,552 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:24:54,552 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:24:54,553 INFO [regionserver/2113c16e5528:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/7f70659c-3a14-bb99-79d3-4868f5326e37/WALs/2113c16e5528,36827,1733358225045/2113c16e5528%2C36827%2C1733358225045.1733358279527 with entries=2, filesize=1.52 KB; new WAL /user/jenkins/test-data/7f70659c-3a14-bb99-79d3-4868f5326e37/WALs/2113c16e5528,36827,1733358225045/2113c16e5528%2C36827%2C1733358225045.1733358284545 2024-12-05T00:24:54,554 DEBUG [regionserver/2113c16e5528:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36099:36099),(127.0.0.1/127.0.0.1:38573:38573)] 2024-12-05T00:24:54,554 DEBUG [regionserver/2113c16e5528:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:45273/user/jenkins/test-data/7f70659c-3a14-bb99-79d3-4868f5326e37/WALs/2113c16e5528,36827,1733358225045/2113c16e5528%2C36827%2C1733358225045.1733358279527 is not closed yet, will try archiving it next time 2024-12-05T00:24:54,554 DEBUG [regionserver/2113c16e5528:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 2113c16e5528%2C36827%2C1733358225045:(num 1733358284545) roll requested 2024-12-05T00:24:54,554 INFO [regionserver/2113c16e5528:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 2113c16e5528%2C36827%2C1733358225045.1733358294554 2024-12-05T00:24:54,557 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38077 is added to blk_1073741842_1018 (size=1569) 2024-12-05T00:24:54,557 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40023 is added to blk_1073741842_1018 (size=1569) 2024-12-05T00:24:54,558 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45273/user/jenkins/test-data/7f70659c-3a14-bb99-79d3-4868f5326e37/data/default/TestLogRolling-testSlowSyncLogRolling/32f4c586ca315abab29a77f6fcad6119/.tmp/info/7701bec8a19a424eb278bcd9fb8ffaa7 is 1080, key is row0015/info:/1733358266302/Put/seqid=0 2024-12-05T00:24:54,564 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38077 is added to blk_1073741844_1020 (size=12509) 2024-12-05T00:24:54,565 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40023 is added to blk_1073741844_1020 (size=12509) 2024-12-05T00:24:54,565 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=31 (bloomFilter=true), to=hdfs://localhost:45273/user/jenkins/test-data/7f70659c-3a14-bb99-79d3-4868f5326e37/data/default/TestLogRolling-testSlowSyncLogRolling/32f4c586ca315abab29a77f6fcad6119/.tmp/info/7701bec8a19a424eb278bcd9fb8ffaa7 2024-12-05T00:24:54,574 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45273/user/jenkins/test-data/7f70659c-3a14-bb99-79d3-4868f5326e37/data/default/TestLogRolling-testSlowSyncLogRolling/32f4c586ca315abab29a77f6fcad6119/.tmp/info/7701bec8a19a424eb278bcd9fb8ffaa7 as hdfs://localhost:45273/user/jenkins/test-data/7f70659c-3a14-bb99-79d3-4868f5326e37/data/default/TestLogRolling-testSlowSyncLogRolling/32f4c586ca315abab29a77f6fcad6119/info/7701bec8a19a424eb278bcd9fb8ffaa7 2024-12-05T00:24:54,583 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45273/user/jenkins/test-data/7f70659c-3a14-bb99-79d3-4868f5326e37/data/default/TestLogRolling-testSlowSyncLogRolling/32f4c586ca315abab29a77f6fcad6119/info/7701bec8a19a424eb278bcd9fb8ffaa7, entries=7, sequenceid=31, filesize=12.2 K 2024-12-05T00:24:59,562 INFO [regionserver/2113c16e5528:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 5006 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:40023,DS-a4e66060-ab46-4fa0-935e-7ea2399c198a,DISK], DatanodeInfoWithStorage[127.0.0.1:38077,DS-b2e319e7-7be9-4588-9599-ac2f1d57f49d,DISK]] 2024-12-05T00:24:59,563 WARN [regionserver/2113c16e5528:0.logRoller {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5006 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:40023,DS-a4e66060-ab46-4fa0-935e-7ea2399c198a,DISK], DatanodeInfoWithStorage[127.0.0.1:38077,DS-b2e319e7-7be9-4588-9599-ac2f1d57f49d,DISK]] 2024-12-05T00:24:59,584 INFO [FSHLog-0-hdfs://localhost:45273/user/jenkins/test-data/7f70659c-3a14-bb99-79d3-4868f5326e37-prefix:2113c16e5528,36827,1733358225045 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:40023,DS-a4e66060-ab46-4fa0-935e-7ea2399c198a,DISK], DatanodeInfoWithStorage[127.0.0.1:38077,DS-b2e319e7-7be9-4588-9599-ac2f1d57f49d,DISK]] 2024-12-05T00:24:59,584 WARN [FSHLog-0-hdfs://localhost:45273/user/jenkins/test-data/7f70659c-3a14-bb99-79d3-4868f5326e37-prefix:2113c16e5528,36827,1733358225045 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5000 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:40023,DS-a4e66060-ab46-4fa0-935e-7ea2399c198a,DISK], DatanodeInfoWithStorage[127.0.0.1:38077,DS-b2e319e7-7be9-4588-9599-ac2f1d57f49d,DISK]] 2024-12-05T00:24:59,584 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 32f4c586ca315abab29a77f6fcad6119 in 10035ms, sequenceid=31, compaction requested=true 2024-12-05T00:24:59,584 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:24:59,584 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 32f4c586ca315abab29a77f6fcad6119: 2024-12-05T00:24:59,585 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:24:59,585 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:24:59,585 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=36.6 K, sizeToCheck=16.0 K 2024-12-05T00:24:59,585 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-05T00:24:59,585 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:24:59,585 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:45273/user/jenkins/test-data/7f70659c-3a14-bb99-79d3-4868f5326e37/data/default/TestLogRolling-testSlowSyncLogRolling/32f4c586ca315abab29a77f6fcad6119/info/15632f416b0f4b64a4b8ce2eab3437fa because midkey is the same as first or last row 2024-12-05T00:24:59,585 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:24:59,585 INFO [regionserver/2113c16e5528:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/7f70659c-3a14-bb99-79d3-4868f5326e37/WALs/2113c16e5528,36827,1733358225045/2113c16e5528%2C36827%2C1733358225045.1733358284545 with entries=1, filesize=430 B; new WAL /user/jenkins/test-data/7f70659c-3a14-bb99-79d3-4868f5326e37/WALs/2113c16e5528,36827,1733358225045/2113c16e5528%2C36827%2C1733358225045.1733358294554 2024-12-05T00:24:59,586 DEBUG [regionserver/2113c16e5528:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36099:36099),(127.0.0.1/127.0.0.1:38573:38573)] 2024-12-05T00:24:59,586 DEBUG [regionserver/2113c16e5528:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:45273/user/jenkins/test-data/7f70659c-3a14-bb99-79d3-4868f5326e37/WALs/2113c16e5528,36827,1733358225045/2113c16e5528%2C36827%2C1733358225045.1733358284545 is not closed yet, will try archiving it next time 2024-12-05T00:24:59,586 DEBUG [regionserver/2113c16e5528:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 2113c16e5528%2C36827%2C1733358225045:(num 1733358294554) roll requested 2024-12-05T00:24:59,586 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:45273/user/jenkins/test-data/7f70659c-3a14-bb99-79d3-4868f5326e37/WALs/2113c16e5528,36827,1733358225045/2113c16e5528%2C36827%2C1733358225045.1733358257273 to hdfs://localhost:45273/user/jenkins/test-data/7f70659c-3a14-bb99-79d3-4868f5326e37/oldWALs/2113c16e5528%2C36827%2C1733358225045.1733358257273 2024-12-05T00:24:59,586 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 32f4c586ca315abab29a77f6fcad6119:info, priority=-2147483648, current under compaction store size is 1 2024-12-05T00:24:59,586 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 2113c16e5528%2C36827%2C1733358225045.1733358299586 2024-12-05T00:24:59,587 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38077 is added to blk_1073741843_1019 (size=438) 2024-12-05T00:24:59,588 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40023 is added to blk_1073741843_1019 (size=438) 2024-12-05T00:24:59,590 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-05T00:24:59,590 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:45273/user/jenkins/test-data/7f70659c-3a14-bb99-79d3-4868f5326e37/WALs/2113c16e5528,36827,1733358225045/2113c16e5528%2C36827%2C1733358225045.1733358268711 to hdfs://localhost:45273/user/jenkins/test-data/7f70659c-3a14-bb99-79d3-4868f5326e37/oldWALs/2113c16e5528%2C36827%2C1733358225045.1733358268711 2024-12-05T00:24:59,590 DEBUG [RS:0;2113c16e5528:36827-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-05T00:24:59,592 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:45273/user/jenkins/test-data/7f70659c-3a14-bb99-79d3-4868f5326e37/WALs/2113c16e5528,36827,1733358225045/2113c16e5528%2C36827%2C1733358225045.1733358279527 to hdfs://localhost:45273/user/jenkins/test-data/7f70659c-3a14-bb99-79d3-4868f5326e37/oldWALs/2113c16e5528%2C36827%2C1733358225045.1733358279527 2024-12-05T00:24:59,594 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:45273/user/jenkins/test-data/7f70659c-3a14-bb99-79d3-4868f5326e37/WALs/2113c16e5528,36827,1733358225045/2113c16e5528%2C36827%2C1733358225045.1733358284545 to hdfs://localhost:45273/user/jenkins/test-data/7f70659c-3a14-bb99-79d3-4868f5326e37/oldWALs/2113c16e5528%2C36827%2C1733358225045.1733358284545 2024-12-05T00:24:59,596 DEBUG [RS:0;2113c16e5528:36827-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37527 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-05T00:24:59,597 DEBUG [RS:0;2113c16e5528:36827-shortCompactions-0 {}] regionserver.HStore(1541): 32f4c586ca315abab29a77f6fcad6119/info is initiating minor compaction (all files) 2024-12-05T00:24:59,598 INFO [RS:0;2113c16e5528:36827-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 32f4c586ca315abab29a77f6fcad6119/info in TestLogRolling-testSlowSyncLogRolling,,1733358227133.32f4c586ca315abab29a77f6fcad6119. 2024-12-05T00:24:59,598 INFO [RS:0;2113c16e5528:36827-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45273/user/jenkins/test-data/7f70659c-3a14-bb99-79d3-4868f5326e37/data/default/TestLogRolling-testSlowSyncLogRolling/32f4c586ca315abab29a77f6fcad6119/info/15632f416b0f4b64a4b8ce2eab3437fa, hdfs://localhost:45273/user/jenkins/test-data/7f70659c-3a14-bb99-79d3-4868f5326e37/data/default/TestLogRolling-testSlowSyncLogRolling/32f4c586ca315abab29a77f6fcad6119/info/1e77a444346f4da2b5080b7ab658c738, hdfs://localhost:45273/user/jenkins/test-data/7f70659c-3a14-bb99-79d3-4868f5326e37/data/default/TestLogRolling-testSlowSyncLogRolling/32f4c586ca315abab29a77f6fcad6119/info/7701bec8a19a424eb278bcd9fb8ffaa7] into tmpdir=hdfs://localhost:45273/user/jenkins/test-data/7f70659c-3a14-bb99-79d3-4868f5326e37/data/default/TestLogRolling-testSlowSyncLogRolling/32f4c586ca315abab29a77f6fcad6119/.tmp, totalSize=36.6 K 2024-12-05T00:24:59,600 DEBUG [RS:0;2113c16e5528:36827-shortCompactions-0 {}] compactions.Compactor(225): Compacting 15632f416b0f4b64a4b8ce2eab3437fa, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1733358237227 2024-12-05T00:24:59,601 DEBUG [RS:0;2113c16e5528:36827-shortCompactions-0 {}] compactions.Compactor(225): Compacting 1e77a444346f4da2b5080b7ab658c738, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=21, earliestPutTs=1733358251263 2024-12-05T00:24:59,601 DEBUG [RS:0;2113c16e5528:36827-shortCompactions-0 {}] compactions.Compactor(225): Compacting 7701bec8a19a424eb278bcd9fb8ffaa7, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=31, earliestPutTs=1733358266302 2024-12-05T00:24:59,602 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:24:59,602 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:24:59,602 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:24:59,602 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:24:59,602 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:24:59,602 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/7f70659c-3a14-bb99-79d3-4868f5326e37/WALs/2113c16e5528,36827,1733358225045/2113c16e5528%2C36827%2C1733358225045.1733358294554 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/7f70659c-3a14-bb99-79d3-4868f5326e37/WALs/2113c16e5528,36827,1733358225045/2113c16e5528%2C36827%2C1733358225045.1733358299586 2024-12-05T00:24:59,605 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38077 is added to blk_1073741845_1021 (size=93) 2024-12-05T00:24:59,605 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40023 is added to blk_1073741845_1021 (size=93) 2024-12-05T00:24:59,606 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:45273/user/jenkins/test-data/7f70659c-3a14-bb99-79d3-4868f5326e37/WALs/2113c16e5528,36827,1733358225045/2113c16e5528%2C36827%2C1733358225045.1733358294554 to hdfs://localhost:45273/user/jenkins/test-data/7f70659c-3a14-bb99-79d3-4868f5326e37/oldWALs/2113c16e5528%2C36827%2C1733358225045.1733358294554 2024-12-05T00:24:59,617 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38573:38573),(127.0.0.1/127.0.0.1:36099:36099)] 2024-12-05T00:24:59,617 INFO [regionserver/2113c16e5528:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 2113c16e5528%2C36827%2C1733358225045.1733358299617 2024-12-05T00:24:59,629 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:24:59,629 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:24:59,629 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:24:59,630 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:24:59,630 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:24:59,630 INFO [regionserver/2113c16e5528:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/7f70659c-3a14-bb99-79d3-4868f5326e37/WALs/2113c16e5528,36827,1733358225045/2113c16e5528%2C36827%2C1733358225045.1733358299586 with entries=1, filesize=1.22 KB; new WAL /user/jenkins/test-data/7f70659c-3a14-bb99-79d3-4868f5326e37/WALs/2113c16e5528,36827,1733358225045/2113c16e5528%2C36827%2C1733358225045.1733358299617 2024-12-05T00:24:59,633 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40023 is added to blk_1073741846_1022 (size=1258) 2024-12-05T00:24:59,633 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38077 is added to blk_1073741846_1022 (size=1258) 2024-12-05T00:24:59,639 DEBUG [regionserver/2113c16e5528:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36099:36099),(127.0.0.1/127.0.0.1:38573:38573)] 2024-12-05T00:24:59,640 DEBUG [regionserver/2113c16e5528:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:45273/user/jenkins/test-data/7f70659c-3a14-bb99-79d3-4868f5326e37/WALs/2113c16e5528,36827,1733358225045/2113c16e5528%2C36827%2C1733358225045.1733358299586 is not closed yet, will try archiving it next time 2024-12-05T00:24:59,644 INFO [RS:0;2113c16e5528:36827-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 32f4c586ca315abab29a77f6fcad6119#info#compaction#3 average throughput is 10.77 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-05T00:24:59,645 DEBUG [RS:0;2113c16e5528:36827-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45273/user/jenkins/test-data/7f70659c-3a14-bb99-79d3-4868f5326e37/data/default/TestLogRolling-testSlowSyncLogRolling/32f4c586ca315abab29a77f6fcad6119/.tmp/info/b47bc3a304e64b0db5795730db0c4a60 is 1080, key is row0001/info:/1733358237227/Put/seqid=0 2024-12-05T00:24:59,652 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40023 is added to blk_1073741848_1024 (size=27710) 2024-12-05T00:24:59,653 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38077 is added to blk_1073741848_1024 (size=27710) 2024-12-05T00:24:59,663 DEBUG [RS:0;2113c16e5528:36827-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45273/user/jenkins/test-data/7f70659c-3a14-bb99-79d3-4868f5326e37/data/default/TestLogRolling-testSlowSyncLogRolling/32f4c586ca315abab29a77f6fcad6119/.tmp/info/b47bc3a304e64b0db5795730db0c4a60 as hdfs://localhost:45273/user/jenkins/test-data/7f70659c-3a14-bb99-79d3-4868f5326e37/data/default/TestLogRolling-testSlowSyncLogRolling/32f4c586ca315abab29a77f6fcad6119/info/b47bc3a304e64b0db5795730db0c4a60 2024-12-05T00:24:59,679 INFO [RS:0;2113c16e5528:36827-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 32f4c586ca315abab29a77f6fcad6119/info of 32f4c586ca315abab29a77f6fcad6119 into b47bc3a304e64b0db5795730db0c4a60(size=27.1 K), total size for store is 27.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-05T00:24:59,679 DEBUG [RS:0;2113c16e5528:36827-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 32f4c586ca315abab29a77f6fcad6119: 2024-12-05T00:24:59,681 INFO [RS:0;2113c16e5528:36827-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testSlowSyncLogRolling,,1733358227133.32f4c586ca315abab29a77f6fcad6119., storeName=32f4c586ca315abab29a77f6fcad6119/info, priority=13, startTime=1733358299586; duration=0sec 2024-12-05T00:24:59,681 DEBUG [RS:0;2113c16e5528:36827-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-12-05T00:24:59,681 DEBUG [RS:0;2113c16e5528:36827-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-05T00:24:59,681 DEBUG [RS:0;2113c16e5528:36827-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:45273/user/jenkins/test-data/7f70659c-3a14-bb99-79d3-4868f5326e37/data/default/TestLogRolling-testSlowSyncLogRolling/32f4c586ca315abab29a77f6fcad6119/info/b47bc3a304e64b0db5795730db0c4a60 because midkey is the same as first or last row 2024-12-05T00:24:59,682 DEBUG [RS:0;2113c16e5528:36827-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-12-05T00:24:59,682 DEBUG [RS:0;2113c16e5528:36827-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-05T00:24:59,682 DEBUG [RS:0;2113c16e5528:36827-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:45273/user/jenkins/test-data/7f70659c-3a14-bb99-79d3-4868f5326e37/data/default/TestLogRolling-testSlowSyncLogRolling/32f4c586ca315abab29a77f6fcad6119/info/b47bc3a304e64b0db5795730db0c4a60 because midkey is the same as first or last row 2024-12-05T00:24:59,682 DEBUG [RS:0;2113c16e5528:36827-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-12-05T00:24:59,682 DEBUG [RS:0;2113c16e5528:36827-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-05T00:24:59,682 DEBUG [RS:0;2113c16e5528:36827-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:45273/user/jenkins/test-data/7f70659c-3a14-bb99-79d3-4868f5326e37/data/default/TestLogRolling-testSlowSyncLogRolling/32f4c586ca315abab29a77f6fcad6119/info/b47bc3a304e64b0db5795730db0c4a60 because midkey is the same as first or last row 2024-12-05T00:24:59,682 DEBUG [RS:0;2113c16e5528:36827-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-05T00:24:59,682 DEBUG [RS:0;2113c16e5528:36827-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 32f4c586ca315abab29a77f6fcad6119:info 2024-12-05T00:25:11,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36827 {}] regionserver.HRegion(8855): Flush requested on 32f4c586ca315abab29a77f6fcad6119 2024-12-05T00:25:11,642 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 32f4c586ca315abab29a77f6fcad6119 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-05T00:25:11,649 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45273/user/jenkins/test-data/7f70659c-3a14-bb99-79d3-4868f5326e37/data/default/TestLogRolling-testSlowSyncLogRolling/32f4c586ca315abab29a77f6fcad6119/.tmp/info/2fdf1171796d4af5ac4de3b1543c7a95 is 1080, key is row0022/info:/1733358299619/Put/seqid=0 2024-12-05T00:25:11,655 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40023 is added to blk_1073741849_1025 (size=12509) 2024-12-05T00:25:11,655 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38077 is added to blk_1073741849_1025 (size=12509) 2024-12-05T00:25:11,656 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=42 (bloomFilter=true), to=hdfs://localhost:45273/user/jenkins/test-data/7f70659c-3a14-bb99-79d3-4868f5326e37/data/default/TestLogRolling-testSlowSyncLogRolling/32f4c586ca315abab29a77f6fcad6119/.tmp/info/2fdf1171796d4af5ac4de3b1543c7a95 2024-12-05T00:25:11,666 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45273/user/jenkins/test-data/7f70659c-3a14-bb99-79d3-4868f5326e37/data/default/TestLogRolling-testSlowSyncLogRolling/32f4c586ca315abab29a77f6fcad6119/.tmp/info/2fdf1171796d4af5ac4de3b1543c7a95 as hdfs://localhost:45273/user/jenkins/test-data/7f70659c-3a14-bb99-79d3-4868f5326e37/data/default/TestLogRolling-testSlowSyncLogRolling/32f4c586ca315abab29a77f6fcad6119/info/2fdf1171796d4af5ac4de3b1543c7a95 2024-12-05T00:25:11,676 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45273/user/jenkins/test-data/7f70659c-3a14-bb99-79d3-4868f5326e37/data/default/TestLogRolling-testSlowSyncLogRolling/32f4c586ca315abab29a77f6fcad6119/info/2fdf1171796d4af5ac4de3b1543c7a95, entries=7, sequenceid=42, filesize=12.2 K 2024-12-05T00:25:11,677 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 32f4c586ca315abab29a77f6fcad6119 in 36ms, sequenceid=42, compaction requested=false 2024-12-05T00:25:11,677 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 32f4c586ca315abab29a77f6fcad6119: 2024-12-05T00:25:11,678 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=39.3 K, sizeToCheck=16.0 K 2024-12-05T00:25:11,678 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-05T00:25:11,678 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:45273/user/jenkins/test-data/7f70659c-3a14-bb99-79d3-4868f5326e37/data/default/TestLogRolling-testSlowSyncLogRolling/32f4c586ca315abab29a77f6fcad6119/info/b47bc3a304e64b0db5795730db0c4a60 because midkey is the same as first or last row 2024-12-05T00:25:13,406 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-05T00:25:17,567 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 32f4c586ca315abab29a77f6fcad6119, had cached 0 bytes from a total of 40219 2024-12-05T00:25:19,652 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-05T00:25:19,653 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-05T00:25:19,653 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-05T00:25:19,658 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T00:25:19,659 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T00:25:19,659 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-05T00:25:19,659 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-05T00:25:19,659 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1469019133, stopped=false 2024-12-05T00:25:19,659 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=2113c16e5528,35931,1733358224328 2024-12-05T00:25:19,661 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35931-0x10180016afe0000, quorum=127.0.0.1:51053, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-05T00:25:19,661 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36827-0x10180016afe0001, quorum=127.0.0.1:51053, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-05T00:25:19,661 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35931-0x10180016afe0000, quorum=127.0.0.1:51053, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T00:25:19,662 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36827-0x10180016afe0001, quorum=127.0.0.1:51053, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T00:25:19,662 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-05T00:25:19,662 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-05T00:25:19,662 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-05T00:25:19,662 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T00:25:19,662 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:35931-0x10180016afe0000, quorum=127.0.0.1:51053, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-05T00:25:19,662 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:36827-0x10180016afe0001, quorum=127.0.0.1:51053, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-05T00:25:19,662 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '2113c16e5528,36827,1733358225045' ***** 2024-12-05T00:25:19,662 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-05T00:25:19,663 INFO [RS:0;2113c16e5528:36827 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-05T00:25:19,663 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-05T00:25:19,663 INFO [RS:0;2113c16e5528:36827 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-05T00:25:19,663 INFO [RS:0;2113c16e5528:36827 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-05T00:25:19,664 INFO [RS:0;2113c16e5528:36827 {}] regionserver.HRegionServer(3091): Received CLOSE for 32f4c586ca315abab29a77f6fcad6119 2024-12-05T00:25:19,664 INFO [RS:0;2113c16e5528:36827 {}] regionserver.HRegionServer(959): stopping server 2113c16e5528,36827,1733358225045 2024-12-05T00:25:19,664 INFO [RS:0;2113c16e5528:36827 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-05T00:25:19,664 INFO [RS:0;2113c16e5528:36827 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;2113c16e5528:36827. 2024-12-05T00:25:19,665 DEBUG [RS:0;2113c16e5528:36827 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-05T00:25:19,665 DEBUG [RS:0;2113c16e5528:36827 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T00:25:19,665 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 32f4c586ca315abab29a77f6fcad6119, disabling compactions & flushes 2024-12-05T00:25:19,665 INFO [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testSlowSyncLogRolling,,1733358227133.32f4c586ca315abab29a77f6fcad6119. 2024-12-05T00:25:19,665 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testSlowSyncLogRolling,,1733358227133.32f4c586ca315abab29a77f6fcad6119. 2024-12-05T00:25:19,665 INFO [RS:0;2113c16e5528:36827 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-05T00:25:19,665 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testSlowSyncLogRolling,,1733358227133.32f4c586ca315abab29a77f6fcad6119. after waiting 0 ms 2024-12-05T00:25:19,665 INFO [RS:0;2113c16e5528:36827 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-05T00:25:19,665 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testSlowSyncLogRolling,,1733358227133.32f4c586ca315abab29a77f6fcad6119. 2024-12-05T00:25:19,665 INFO [RS:0;2113c16e5528:36827 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-05T00:25:19,665 INFO [RS:0;2113c16e5528:36827 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-05T00:25:19,665 INFO [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing 32f4c586ca315abab29a77f6fcad6119 1/1 column families, dataSize=3.15 KB heapSize=3.63 KB 2024-12-05T00:25:19,666 INFO [RS:0;2113c16e5528:36827 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-12-05T00:25:19,666 DEBUG [RS_CLOSE_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-05T00:25:19,666 DEBUG [RS:0;2113c16e5528:36827 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, 32f4c586ca315abab29a77f6fcad6119=TestLogRolling-testSlowSyncLogRolling,,1733358227133.32f4c586ca315abab29a77f6fcad6119.} 2024-12-05T00:25:19,666 INFO [RS_CLOSE_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-05T00:25:19,666 DEBUG [RS_CLOSE_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-05T00:25:19,666 DEBUG [RS_CLOSE_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-05T00:25:19,666 DEBUG [RS_CLOSE_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-05T00:25:19,666 DEBUG [RS:0;2113c16e5528:36827 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 32f4c586ca315abab29a77f6fcad6119 2024-12-05T00:25:19,666 INFO [RS_CLOSE_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.65 KB heapSize=3.67 KB 2024-12-05T00:25:19,672 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45273/user/jenkins/test-data/7f70659c-3a14-bb99-79d3-4868f5326e37/data/default/TestLogRolling-testSlowSyncLogRolling/32f4c586ca315abab29a77f6fcad6119/.tmp/info/f7bbe16b82044262b637eee3da81c454 is 1080, key is row0029/info:/1733358313644/Put/seqid=0 2024-12-05T00:25:19,679 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38077 is added to blk_1073741850_1026 (size=8193) 2024-12-05T00:25:19,679 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40023 is added to blk_1073741850_1026 (size=8193) 2024-12-05T00:25:19,680 INFO [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.15 KB at sequenceid=48 (bloomFilter=true), to=hdfs://localhost:45273/user/jenkins/test-data/7f70659c-3a14-bb99-79d3-4868f5326e37/data/default/TestLogRolling-testSlowSyncLogRolling/32f4c586ca315abab29a77f6fcad6119/.tmp/info/f7bbe16b82044262b637eee3da81c454 2024-12-05T00:25:19,689 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45273/user/jenkins/test-data/7f70659c-3a14-bb99-79d3-4868f5326e37/data/default/TestLogRolling-testSlowSyncLogRolling/32f4c586ca315abab29a77f6fcad6119/.tmp/info/f7bbe16b82044262b637eee3da81c454 as hdfs://localhost:45273/user/jenkins/test-data/7f70659c-3a14-bb99-79d3-4868f5326e37/data/default/TestLogRolling-testSlowSyncLogRolling/32f4c586ca315abab29a77f6fcad6119/info/f7bbe16b82044262b637eee3da81c454 2024-12-05T00:25:19,694 DEBUG [RS_CLOSE_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45273/user/jenkins/test-data/7f70659c-3a14-bb99-79d3-4868f5326e37/data/hbase/meta/1588230740/.tmp/info/da22daa72a3844f3a60b917a9b41fbbf is 195, key is TestLogRolling-testSlowSyncLogRolling,,1733358227133.32f4c586ca315abab29a77f6fcad6119./info:regioninfo/1733358227590/Put/seqid=0 2024-12-05T00:25:19,697 INFO [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45273/user/jenkins/test-data/7f70659c-3a14-bb99-79d3-4868f5326e37/data/default/TestLogRolling-testSlowSyncLogRolling/32f4c586ca315abab29a77f6fcad6119/info/f7bbe16b82044262b637eee3da81c454, entries=3, sequenceid=48, filesize=8.0 K 2024-12-05T00:25:19,698 INFO [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~3.15 KB/3228, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 32f4c586ca315abab29a77f6fcad6119 in 33ms, sequenceid=48, compaction requested=true 2024-12-05T00:25:19,702 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1733358227133.32f4c586ca315abab29a77f6fcad6119.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:45273/user/jenkins/test-data/7f70659c-3a14-bb99-79d3-4868f5326e37/data/default/TestLogRolling-testSlowSyncLogRolling/32f4c586ca315abab29a77f6fcad6119/info/15632f416b0f4b64a4b8ce2eab3437fa, hdfs://localhost:45273/user/jenkins/test-data/7f70659c-3a14-bb99-79d3-4868f5326e37/data/default/TestLogRolling-testSlowSyncLogRolling/32f4c586ca315abab29a77f6fcad6119/info/1e77a444346f4da2b5080b7ab658c738, hdfs://localhost:45273/user/jenkins/test-data/7f70659c-3a14-bb99-79d3-4868f5326e37/data/default/TestLogRolling-testSlowSyncLogRolling/32f4c586ca315abab29a77f6fcad6119/info/7701bec8a19a424eb278bcd9fb8ffaa7] to archive 2024-12-05T00:25:19,705 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38077 is added to blk_1073741851_1027 (size=7016) 2024-12-05T00:25:19,706 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40023 is added to blk_1073741851_1027 (size=7016) 2024-12-05T00:25:19,706 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1733358227133.32f4c586ca315abab29a77f6fcad6119.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-05T00:25:19,706 INFO [RS_CLOSE_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.45 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:45273/user/jenkins/test-data/7f70659c-3a14-bb99-79d3-4868f5326e37/data/hbase/meta/1588230740/.tmp/info/da22daa72a3844f3a60b917a9b41fbbf 2024-12-05T00:25:19,710 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1733358227133.32f4c586ca315abab29a77f6fcad6119.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45273/user/jenkins/test-data/7f70659c-3a14-bb99-79d3-4868f5326e37/data/default/TestLogRolling-testSlowSyncLogRolling/32f4c586ca315abab29a77f6fcad6119/info/15632f416b0f4b64a4b8ce2eab3437fa to hdfs://localhost:45273/user/jenkins/test-data/7f70659c-3a14-bb99-79d3-4868f5326e37/archive/data/default/TestLogRolling-testSlowSyncLogRolling/32f4c586ca315abab29a77f6fcad6119/info/15632f416b0f4b64a4b8ce2eab3437fa 2024-12-05T00:25:19,712 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1733358227133.32f4c586ca315abab29a77f6fcad6119.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45273/user/jenkins/test-data/7f70659c-3a14-bb99-79d3-4868f5326e37/data/default/TestLogRolling-testSlowSyncLogRolling/32f4c586ca315abab29a77f6fcad6119/info/1e77a444346f4da2b5080b7ab658c738 to hdfs://localhost:45273/user/jenkins/test-data/7f70659c-3a14-bb99-79d3-4868f5326e37/archive/data/default/TestLogRolling-testSlowSyncLogRolling/32f4c586ca315abab29a77f6fcad6119/info/1e77a444346f4da2b5080b7ab658c738 2024-12-05T00:25:19,714 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1733358227133.32f4c586ca315abab29a77f6fcad6119.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45273/user/jenkins/test-data/7f70659c-3a14-bb99-79d3-4868f5326e37/data/default/TestLogRolling-testSlowSyncLogRolling/32f4c586ca315abab29a77f6fcad6119/info/7701bec8a19a424eb278bcd9fb8ffaa7 to hdfs://localhost:45273/user/jenkins/test-data/7f70659c-3a14-bb99-79d3-4868f5326e37/archive/data/default/TestLogRolling-testSlowSyncLogRolling/32f4c586ca315abab29a77f6fcad6119/info/7701bec8a19a424eb278bcd9fb8ffaa7 2024-12-05T00:25:19,731 DEBUG [RS_CLOSE_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45273/user/jenkins/test-data/7f70659c-3a14-bb99-79d3-4868f5326e37/data/hbase/meta/1588230740/.tmp/ns/bed272db55f9468f931c975aca46a31b is 43, key is default/ns:d/1733358226870/Put/seqid=0 2024-12-05T00:25:19,727 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1733358227133.32f4c586ca315abab29a77f6fcad6119.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=2113c16e5528:35931 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-12-05T00:25:19,731 WARN [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1733358227133.32f4c586ca315abab29a77f6fcad6119.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [15632f416b0f4b64a4b8ce2eab3437fa=12509, 1e77a444346f4da2b5080b7ab658c738=12509, 7701bec8a19a424eb278bcd9fb8ffaa7=12509] 2024-12-05T00:25:19,738 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45273/user/jenkins/test-data/7f70659c-3a14-bb99-79d3-4868f5326e37/data/default/TestLogRolling-testSlowSyncLogRolling/32f4c586ca315abab29a77f6fcad6119/recovered.edits/51.seqid, newMaxSeqId=51, maxSeqId=1 2024-12-05T00:25:19,739 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38077 is added to blk_1073741852_1028 (size=5153) 2024-12-05T00:25:19,740 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40023 is added to blk_1073741852_1028 (size=5153) 2024-12-05T00:25:19,740 INFO [RS_CLOSE_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:45273/user/jenkins/test-data/7f70659c-3a14-bb99-79d3-4868f5326e37/data/hbase/meta/1588230740/.tmp/ns/bed272db55f9468f931c975aca46a31b 2024-12-05T00:25:19,742 INFO [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testSlowSyncLogRolling,,1733358227133.32f4c586ca315abab29a77f6fcad6119. 2024-12-05T00:25:19,742 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 32f4c586ca315abab29a77f6fcad6119: Waiting for close lock at 1733358319664Running coprocessor pre-close hooks at 1733358319665 (+1 ms)Disabling compacts and flushes for region at 1733358319665Disabling writes for close at 1733358319665Obtaining lock to block concurrent updates at 1733358319665Preparing flush snapshotting stores in 32f4c586ca315abab29a77f6fcad6119 at 1733358319665Finished memstore snapshotting TestLogRolling-testSlowSyncLogRolling,,1733358227133.32f4c586ca315abab29a77f6fcad6119., syncing WAL and waiting on mvcc, flushsize=dataSize=3228, getHeapSize=3696, getOffHeapSize=0, getCellsCount=3 at 1733358319666 (+1 ms)Flushing stores of TestLogRolling-testSlowSyncLogRolling,,1733358227133.32f4c586ca315abab29a77f6fcad6119. at 1733358319667 (+1 ms)Flushing 32f4c586ca315abab29a77f6fcad6119/info: creating writer at 1733358319667Flushing 32f4c586ca315abab29a77f6fcad6119/info: appending metadata at 1733358319672 (+5 ms)Flushing 32f4c586ca315abab29a77f6fcad6119/info: closing flushed file at 1733358319672Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@44387c55: reopening flushed file at 1733358319688 (+16 ms)Finished flush of dataSize ~3.15 KB/3228, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 32f4c586ca315abab29a77f6fcad6119 in 33ms, sequenceid=48, compaction requested=true at 1733358319698 (+10 ms)Writing region close event to WAL at 1733358319732 (+34 ms)Running coprocessor post-close hooks at 1733358319740 (+8 ms)Closed at 1733358319742 (+2 ms) 2024-12-05T00:25:19,743 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testSlowSyncLogRolling,,1733358227133.32f4c586ca315abab29a77f6fcad6119. 2024-12-05T00:25:19,767 DEBUG [RS_CLOSE_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45273/user/jenkins/test-data/7f70659c-3a14-bb99-79d3-4868f5326e37/data/hbase/meta/1588230740/.tmp/table/56c6a02883f84f2d9eb73cff3308fc8d is 73, key is TestLogRolling-testSlowSyncLogRolling/table:state/1733358227606/Put/seqid=0 2024-12-05T00:25:19,775 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40023 is added to blk_1073741853_1029 (size=5396) 2024-12-05T00:25:19,775 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38077 is added to blk_1073741853_1029 (size=5396) 2024-12-05T00:25:19,866 DEBUG [RS:0;2113c16e5528:36827 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-12-05T00:25:20,067 DEBUG [RS:0;2113c16e5528:36827 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-12-05T00:25:20,166 INFO [regionserver/2113c16e5528:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-05T00:25:20,166 INFO [regionserver/2113c16e5528:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-12-05T00:25:20,169 INFO [regionserver/2113c16e5528:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-05T00:25:20,175 INFO [RS_CLOSE_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=138 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:45273/user/jenkins/test-data/7f70659c-3a14-bb99-79d3-4868f5326e37/data/hbase/meta/1588230740/.tmp/table/56c6a02883f84f2d9eb73cff3308fc8d 2024-12-05T00:25:20,184 DEBUG [RS_CLOSE_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45273/user/jenkins/test-data/7f70659c-3a14-bb99-79d3-4868f5326e37/data/hbase/meta/1588230740/.tmp/info/da22daa72a3844f3a60b917a9b41fbbf as hdfs://localhost:45273/user/jenkins/test-data/7f70659c-3a14-bb99-79d3-4868f5326e37/data/hbase/meta/1588230740/info/da22daa72a3844f3a60b917a9b41fbbf 2024-12-05T00:25:20,191 INFO [RS_CLOSE_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45273/user/jenkins/test-data/7f70659c-3a14-bb99-79d3-4868f5326e37/data/hbase/meta/1588230740/info/da22daa72a3844f3a60b917a9b41fbbf, entries=10, sequenceid=11, filesize=6.9 K 2024-12-05T00:25:20,193 DEBUG [RS_CLOSE_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45273/user/jenkins/test-data/7f70659c-3a14-bb99-79d3-4868f5326e37/data/hbase/meta/1588230740/.tmp/ns/bed272db55f9468f931c975aca46a31b as hdfs://localhost:45273/user/jenkins/test-data/7f70659c-3a14-bb99-79d3-4868f5326e37/data/hbase/meta/1588230740/ns/bed272db55f9468f931c975aca46a31b 2024-12-05T00:25:20,199 INFO [RS_CLOSE_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45273/user/jenkins/test-data/7f70659c-3a14-bb99-79d3-4868f5326e37/data/hbase/meta/1588230740/ns/bed272db55f9468f931c975aca46a31b, entries=2, sequenceid=11, filesize=5.0 K 2024-12-05T00:25:20,201 DEBUG [RS_CLOSE_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45273/user/jenkins/test-data/7f70659c-3a14-bb99-79d3-4868f5326e37/data/hbase/meta/1588230740/.tmp/table/56c6a02883f84f2d9eb73cff3308fc8d as hdfs://localhost:45273/user/jenkins/test-data/7f70659c-3a14-bb99-79d3-4868f5326e37/data/hbase/meta/1588230740/table/56c6a02883f84f2d9eb73cff3308fc8d 2024-12-05T00:25:20,207 INFO [RS_CLOSE_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45273/user/jenkins/test-data/7f70659c-3a14-bb99-79d3-4868f5326e37/data/hbase/meta/1588230740/table/56c6a02883f84f2d9eb73cff3308fc8d, entries=2, sequenceid=11, filesize=5.3 K 2024-12-05T00:25:20,209 INFO [RS_CLOSE_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.65 KB/1692, heapSize ~3.38 KB/3456, currentSize=0 B/0 for 1588230740 in 543ms, sequenceid=11, compaction requested=false 2024-12-05T00:25:20,214 DEBUG [RS_CLOSE_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45273/user/jenkins/test-data/7f70659c-3a14-bb99-79d3-4868f5326e37/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-12-05T00:25:20,215 DEBUG [RS_CLOSE_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-05T00:25:20,215 INFO [RS_CLOSE_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-05T00:25:20,216 DEBUG [RS_CLOSE_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733358319666Running coprocessor pre-close hooks at 1733358319666Disabling compacts and flushes for region at 1733358319666Disabling writes for close at 1733358319666Obtaining lock to block concurrent updates at 1733358319666Preparing flush snapshotting stores in 1588230740 at 1733358319666Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1692, getHeapSize=3696, getOffHeapSize=0, getCellsCount=14 at 1733358319667 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1733358319667Flushing 1588230740/info: creating writer at 1733358319668 (+1 ms)Flushing 1588230740/info: appending metadata at 1733358319693 (+25 ms)Flushing 1588230740/info: closing flushed file at 1733358319693Flushing 1588230740/ns: creating writer at 1733358319714 (+21 ms)Flushing 1588230740/ns: appending metadata at 1733358319731 (+17 ms)Flushing 1588230740/ns: closing flushed file at 1733358319731Flushing 1588230740/table: creating writer at 1733358319750 (+19 ms)Flushing 1588230740/table: appending metadata at 1733358319767 (+17 ms)Flushing 1588230740/table: closing flushed file at 1733358319767Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1c1eb70e: reopening flushed file at 1733358320182 (+415 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@33dfb2d5: reopening flushed file at 1733358320192 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@38d0909: reopening flushed file at 1733358320200 (+8 ms)Finished flush of dataSize ~1.65 KB/1692, heapSize ~3.38 KB/3456, currentSize=0 B/0 for 1588230740 in 543ms, sequenceid=11, compaction requested=false at 1733358320209 (+9 ms)Writing region close event to WAL at 1733358320210 (+1 ms)Running coprocessor post-close hooks at 1733358320215 (+5 ms)Closed at 1733358320215 2024-12-05T00:25:20,216 DEBUG [RS_CLOSE_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-05T00:25:20,267 INFO [RS:0;2113c16e5528:36827 {}] regionserver.HRegionServer(976): stopping server 2113c16e5528,36827,1733358225045; all regions closed. 2024-12-05T00:25:20,269 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:25:20,269 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:25:20,269 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:25:20,269 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:25:20,269 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:25:20,271 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40023 is added to blk_1073741834_1010 (size=3066) 2024-12-05T00:25:20,272 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38077 is added to blk_1073741834_1010 (size=3066) 2024-12-05T00:25:20,276 DEBUG [RS:0;2113c16e5528:36827 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/7f70659c-3a14-bb99-79d3-4868f5326e37/oldWALs 2024-12-05T00:25:20,276 INFO [RS:0;2113c16e5528:36827 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 2113c16e5528%2C36827%2C1733358225045.meta:.meta(num 1733358226722) 2024-12-05T00:25:20,276 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:25:20,276 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:25:20,277 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:25:20,277 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:25:20,277 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:25:20,279 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38077 is added to blk_1073741847_1023 (size=12695) 2024-12-05T00:25:20,279 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40023 is added to blk_1073741847_1023 (size=12695) 2024-12-05T00:25:20,282 DEBUG [RS:0;2113c16e5528:36827 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/7f70659c-3a14-bb99-79d3-4868f5326e37/oldWALs 2024-12-05T00:25:20,282 INFO [RS:0;2113c16e5528:36827 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 2113c16e5528%2C36827%2C1733358225045:(num 1733358299617) 2024-12-05T00:25:20,282 DEBUG [RS:0;2113c16e5528:36827 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T00:25:20,282 INFO [RS:0;2113c16e5528:36827 {}] regionserver.LeaseManager(133): Closed leases 2024-12-05T00:25:20,282 INFO [RS:0;2113c16e5528:36827 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-05T00:25:20,283 INFO [RS:0;2113c16e5528:36827 {}] hbase.ChoreService(370): Chore service for: regionserver/2113c16e5528:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-12-05T00:25:20,283 INFO [RS:0;2113c16e5528:36827 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-05T00:25:20,283 INFO [regionserver/2113c16e5528:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-05T00:25:20,283 INFO [RS:0;2113c16e5528:36827 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:36827 2024-12-05T00:25:20,287 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36827-0x10180016afe0001, quorum=127.0.0.1:51053, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/2113c16e5528,36827,1733358225045 2024-12-05T00:25:20,287 INFO [RS:0;2113c16e5528:36827 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-05T00:25:20,287 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35931-0x10180016afe0000, quorum=127.0.0.1:51053, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-05T00:25:20,289 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [2113c16e5528,36827,1733358225045] 2024-12-05T00:25:20,290 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/2113c16e5528,36827,1733358225045 already deleted, retry=false 2024-12-05T00:25:20,291 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 2113c16e5528,36827,1733358225045 expired; onlineServers=0 2024-12-05T00:25:20,291 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '2113c16e5528,35931,1733358224328' ***** 2024-12-05T00:25:20,291 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-05T00:25:20,291 INFO [M:0;2113c16e5528:35931 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-05T00:25:20,291 INFO [M:0;2113c16e5528:35931 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-05T00:25:20,291 DEBUG [M:0;2113c16e5528:35931 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-05T00:25:20,291 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-05T00:25:20,291 DEBUG [M:0;2113c16e5528:35931 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-05T00:25:20,291 DEBUG [master/2113c16e5528:0:becomeActiveMaster-HFileCleaner.small.0-1733358226003 {}] cleaner.HFileCleaner(306): Exit Thread[master/2113c16e5528:0:becomeActiveMaster-HFileCleaner.small.0-1733358226003,5,FailOnTimeoutGroup] 2024-12-05T00:25:20,291 DEBUG [master/2113c16e5528:0:becomeActiveMaster-HFileCleaner.large.0-1733358226002 {}] cleaner.HFileCleaner(306): Exit Thread[master/2113c16e5528:0:becomeActiveMaster-HFileCleaner.large.0-1733358226002,5,FailOnTimeoutGroup] 2024-12-05T00:25:20,292 INFO [M:0;2113c16e5528:35931 {}] hbase.ChoreService(370): Chore service for: master/2113c16e5528:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-05T00:25:20,292 INFO [M:0;2113c16e5528:35931 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-05T00:25:20,292 DEBUG [M:0;2113c16e5528:35931 {}] master.HMaster(1795): Stopping service threads 2024-12-05T00:25:20,292 INFO [M:0;2113c16e5528:35931 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-05T00:25:20,292 INFO [M:0;2113c16e5528:35931 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-05T00:25:20,292 INFO [M:0;2113c16e5528:35931 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-05T00:25:20,292 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-05T00:25:20,294 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35931-0x10180016afe0000, quorum=127.0.0.1:51053, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-05T00:25:20,294 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35931-0x10180016afe0000, quorum=127.0.0.1:51053, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T00:25:20,294 DEBUG [M:0;2113c16e5528:35931 {}] zookeeper.ZKUtil(347): master:35931-0x10180016afe0000, quorum=127.0.0.1:51053, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-05T00:25:20,294 WARN [M:0;2113c16e5528:35931 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-05T00:25:20,295 INFO [M:0;2113c16e5528:35931 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:45273/user/jenkins/test-data/7f70659c-3a14-bb99-79d3-4868f5326e37/.lastflushedseqids 2024-12-05T00:25:20,307 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40023 is added to blk_1073741854_1030 (size=130) 2024-12-05T00:25:20,307 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38077 is added to blk_1073741854_1030 (size=130) 2024-12-05T00:25:20,307 INFO [M:0;2113c16e5528:35931 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-05T00:25:20,307 INFO [M:0;2113c16e5528:35931 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-05T00:25:20,308 DEBUG [M:0;2113c16e5528:35931 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-05T00:25:20,308 INFO [M:0;2113c16e5528:35931 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-05T00:25:20,308 DEBUG [M:0;2113c16e5528:35931 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-05T00:25:20,308 DEBUG [M:0;2113c16e5528:35931 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-05T00:25:20,308 DEBUG [M:0;2113c16e5528:35931 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-05T00:25:20,308 INFO [M:0;2113c16e5528:35931 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.02 KB heapSize=29.20 KB 2024-12-05T00:25:20,326 DEBUG [M:0;2113c16e5528:35931 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45273/user/jenkins/test-data/7f70659c-3a14-bb99-79d3-4868f5326e37/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/20c14ce83df14307b8bb66255c27af59 is 82, key is hbase:meta,,1/info:regioninfo/1733358226796/Put/seqid=0 2024-12-05T00:25:20,332 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40023 is added to blk_1073741855_1031 (size=5672) 2024-12-05T00:25:20,333 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38077 is added to blk_1073741855_1031 (size=5672) 2024-12-05T00:25:20,333 INFO [M:0;2113c16e5528:35931 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:45273/user/jenkins/test-data/7f70659c-3a14-bb99-79d3-4868f5326e37/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/20c14ce83df14307b8bb66255c27af59 2024-12-05T00:25:20,356 DEBUG [M:0;2113c16e5528:35931 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45273/user/jenkins/test-data/7f70659c-3a14-bb99-79d3-4868f5326e37/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/fbc984036f2141feb57e161908839ece is 766, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1733358227613/Put/seqid=0 2024-12-05T00:25:20,361 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40023 is added to blk_1073741856_1032 (size=6247) 2024-12-05T00:25:20,362 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38077 is added to blk_1073741856_1032 (size=6247) 2024-12-05T00:25:20,362 INFO [M:0;2113c16e5528:35931 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.42 KB at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:45273/user/jenkins/test-data/7f70659c-3a14-bb99-79d3-4868f5326e37/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/fbc984036f2141feb57e161908839ece 2024-12-05T00:25:20,369 INFO [M:0;2113c16e5528:35931 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for fbc984036f2141feb57e161908839ece 2024-12-05T00:25:20,385 DEBUG [M:0;2113c16e5528:35931 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45273/user/jenkins/test-data/7f70659c-3a14-bb99-79d3-4868f5326e37/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/edbefbb30dd84330b040f19315f6a460 is 69, key is 2113c16e5528,36827,1733358225045/rs:state/1733358226083/Put/seqid=0 2024-12-05T00:25:20,390 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36827-0x10180016afe0001, quorum=127.0.0.1:51053, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-05T00:25:20,390 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36827-0x10180016afe0001, quorum=127.0.0.1:51053, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-05T00:25:20,390 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38077 is added to blk_1073741857_1033 (size=5156) 2024-12-05T00:25:20,390 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40023 is added to blk_1073741857_1033 (size=5156) 2024-12-05T00:25:20,390 INFO [RS:0;2113c16e5528:36827 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-05T00:25:20,391 INFO [RS:0;2113c16e5528:36827 {}] regionserver.HRegionServer(1031): Exiting; stopping=2113c16e5528,36827,1733358225045; zookeeper connection closed. 2024-12-05T00:25:20,391 INFO [M:0;2113c16e5528:35931 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:45273/user/jenkins/test-data/7f70659c-3a14-bb99-79d3-4868f5326e37/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/edbefbb30dd84330b040f19315f6a460 2024-12-05T00:25:20,391 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@38268766 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@38268766 2024-12-05T00:25:20,391 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-12-05T00:25:20,411 DEBUG [M:0;2113c16e5528:35931 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45273/user/jenkins/test-data/7f70659c-3a14-bb99-79d3-4868f5326e37/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/6202afbf01594b54b4e502a70980e3d8 is 52, key is load_balancer_on/state:d/1733358227112/Put/seqid=0 2024-12-05T00:25:20,417 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40023 is added to blk_1073741858_1034 (size=5056) 2024-12-05T00:25:20,417 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38077 is added to blk_1073741858_1034 (size=5056) 2024-12-05T00:25:20,418 INFO [M:0;2113c16e5528:35931 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:45273/user/jenkins/test-data/7f70659c-3a14-bb99-79d3-4868f5326e37/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/6202afbf01594b54b4e502a70980e3d8 2024-12-05T00:25:20,425 DEBUG [M:0;2113c16e5528:35931 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45273/user/jenkins/test-data/7f70659c-3a14-bb99-79d3-4868f5326e37/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/20c14ce83df14307b8bb66255c27af59 as hdfs://localhost:45273/user/jenkins/test-data/7f70659c-3a14-bb99-79d3-4868f5326e37/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/20c14ce83df14307b8bb66255c27af59 2024-12-05T00:25:20,431 INFO [M:0;2113c16e5528:35931 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45273/user/jenkins/test-data/7f70659c-3a14-bb99-79d3-4868f5326e37/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/20c14ce83df14307b8bb66255c27af59, entries=8, sequenceid=59, filesize=5.5 K 2024-12-05T00:25:20,433 DEBUG [M:0;2113c16e5528:35931 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45273/user/jenkins/test-data/7f70659c-3a14-bb99-79d3-4868f5326e37/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/fbc984036f2141feb57e161908839ece as hdfs://localhost:45273/user/jenkins/test-data/7f70659c-3a14-bb99-79d3-4868f5326e37/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/fbc984036f2141feb57e161908839ece 2024-12-05T00:25:20,439 INFO [M:0;2113c16e5528:35931 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for fbc984036f2141feb57e161908839ece 2024-12-05T00:25:20,439 INFO [M:0;2113c16e5528:35931 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45273/user/jenkins/test-data/7f70659c-3a14-bb99-79d3-4868f5326e37/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/fbc984036f2141feb57e161908839ece, entries=6, sequenceid=59, filesize=6.1 K 2024-12-05T00:25:20,440 DEBUG [M:0;2113c16e5528:35931 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45273/user/jenkins/test-data/7f70659c-3a14-bb99-79d3-4868f5326e37/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/edbefbb30dd84330b040f19315f6a460 as hdfs://localhost:45273/user/jenkins/test-data/7f70659c-3a14-bb99-79d3-4868f5326e37/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/edbefbb30dd84330b040f19315f6a460 2024-12-05T00:25:20,448 INFO [M:0;2113c16e5528:35931 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45273/user/jenkins/test-data/7f70659c-3a14-bb99-79d3-4868f5326e37/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/edbefbb30dd84330b040f19315f6a460, entries=1, sequenceid=59, filesize=5.0 K 2024-12-05T00:25:20,449 DEBUG [M:0;2113c16e5528:35931 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45273/user/jenkins/test-data/7f70659c-3a14-bb99-79d3-4868f5326e37/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/6202afbf01594b54b4e502a70980e3d8 as hdfs://localhost:45273/user/jenkins/test-data/7f70659c-3a14-bb99-79d3-4868f5326e37/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/6202afbf01594b54b4e502a70980e3d8 2024-12-05T00:25:20,456 INFO [M:0;2113c16e5528:35931 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45273/user/jenkins/test-data/7f70659c-3a14-bb99-79d3-4868f5326e37/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/6202afbf01594b54b4e502a70980e3d8, entries=1, sequenceid=59, filesize=4.9 K 2024-12-05T00:25:20,458 INFO [M:0;2113c16e5528:35931 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.02 KB/23576, heapSize ~29.14 KB/29840, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 150ms, sequenceid=59, compaction requested=false 2024-12-05T00:25:20,459 INFO [M:0;2113c16e5528:35931 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-05T00:25:20,460 DEBUG [M:0;2113c16e5528:35931 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733358320308Disabling compacts and flushes for region at 1733358320308Disabling writes for close at 1733358320308Obtaining lock to block concurrent updates at 1733358320308Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733358320308Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23576, getHeapSize=29840, getOffHeapSize=0, getCellsCount=70 at 1733358320309 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733358320310 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733358320310Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733358320326 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733358320326Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733358320340 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733358320355 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733358320355Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733358320369 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733358320384 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733358320384Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1733358320397 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1733358320411 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1733358320411Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@cc66c9e: reopening flushed file at 1733358320424 (+13 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@d7cbcd5: reopening flushed file at 1733358320431 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7fca70ed: reopening flushed file at 1733358320439 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@12f40d00: reopening flushed file at 1733358320448 (+9 ms)Finished flush of dataSize ~23.02 KB/23576, heapSize ~29.14 KB/29840, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 150ms, sequenceid=59, compaction requested=false at 1733358320458 (+10 ms)Writing region close event to WAL at 1733358320459 (+1 ms)Closed at 1733358320459 2024-12-05T00:25:20,460 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:25:20,461 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:25:20,461 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:25:20,461 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:25:20,461 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:25:20,463 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40023 is added to blk_1073741830_1006 (size=27973) 2024-12-05T00:25:20,463 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38077 is added to blk_1073741830_1006 (size=27973) 2024-12-05T00:25:20,464 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-05T00:25:20,464 INFO [M:0;2113c16e5528:35931 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-05T00:25:20,464 INFO [M:0;2113c16e5528:35931 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:35931 2024-12-05T00:25:20,465 INFO [M:0;2113c16e5528:35931 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-05T00:25:20,567 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35931-0x10180016afe0000, quorum=127.0.0.1:51053, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-05T00:25:20,567 INFO [M:0;2113c16e5528:35931 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-05T00:25:20,567 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35931-0x10180016afe0000, quorum=127.0.0.1:51053, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-05T00:25:20,572 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1bf97579{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-05T00:25:20,574 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@22b88bcb{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-05T00:25:20,574 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-05T00:25:20,574 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2d48d695{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-05T00:25:20,574 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@11effdcd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/703f8370-3f4b-8740-767a-d662c6a0d9fc/hadoop.log.dir/,STOPPED} 2024-12-05T00:25:20,578 WARN [BP-1977460799-172.17.0.2-1733358221383 heartbeating to localhost/127.0.0.1:45273 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-05T00:25:20,578 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-05T00:25:20,578 WARN [BP-1977460799-172.17.0.2-1733358221383 heartbeating to localhost/127.0.0.1:45273 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1977460799-172.17.0.2-1733358221383 (Datanode Uuid fe788050-3610-4b50-a546-99e744c8ae07) service to localhost/127.0.0.1:45273 2024-12-05T00:25:20,578 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-05T00:25:20,580 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/703f8370-3f4b-8740-767a-d662c6a0d9fc/cluster_a63b60db-13ec-cfe8-9570-22099ec66976/data/data3/current/BP-1977460799-172.17.0.2-1733358221383 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-05T00:25:20,580 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/703f8370-3f4b-8740-767a-d662c6a0d9fc/cluster_a63b60db-13ec-cfe8-9570-22099ec66976/data/data4/current/BP-1977460799-172.17.0.2-1733358221383 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-05T00:25:20,580 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-05T00:25:20,582 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7b07d1ba{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-05T00:25:20,583 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@43e0a762{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-05T00:25:20,583 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-05T00:25:20,583 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@371e191c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-05T00:25:20,583 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@28778f0f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/703f8370-3f4b-8740-767a-d662c6a0d9fc/hadoop.log.dir/,STOPPED} 2024-12-05T00:25:20,585 WARN [BP-1977460799-172.17.0.2-1733358221383 heartbeating to localhost/127.0.0.1:45273 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-05T00:25:20,585 WARN [BP-1977460799-172.17.0.2-1733358221383 heartbeating to localhost/127.0.0.1:45273 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1977460799-172.17.0.2-1733358221383 (Datanode Uuid 2553d511-29bd-451d-987a-25b7a501f835) service to localhost/127.0.0.1:45273 2024-12-05T00:25:20,588 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-05T00:25:20,589 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-05T00:25:20,589 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/703f8370-3f4b-8740-767a-d662c6a0d9fc/cluster_a63b60db-13ec-cfe8-9570-22099ec66976/data/data1/current/BP-1977460799-172.17.0.2-1733358221383 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-05T00:25:20,589 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/703f8370-3f4b-8740-767a-d662c6a0d9fc/cluster_a63b60db-13ec-cfe8-9570-22099ec66976/data/data2/current/BP-1977460799-172.17.0.2-1733358221383 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-05T00:25:20,589 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-05T00:25:20,599 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@735fa16a{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-05T00:25:20,600 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6c26a5a3{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-05T00:25:20,600 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-05T00:25:20,600 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@70be1389{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-05T00:25:20,601 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@ddc8467{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/703f8370-3f4b-8740-767a-d662c6a0d9fc/hadoop.log.dir/,STOPPED} 2024-12-05T00:25:20,610 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-05T00:25:20,641 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-05T00:25:20,651 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testSlowSyncLogRolling Thread=77 (was 12) Potentially hanging thread: HMaster-EventLoopGroup-1-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'HBase' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: SnapshotHandlerChoreCleaner java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:45273 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:45273 from jenkins.hfs.0 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: HMaster-EventLoopGroup-1-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:45273 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Monitor thread for TaskMonitor java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:45273 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: SSL Certificates Store Monitor java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: ForkJoinPool-2-worker-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: nioEventLoopGroup-3-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:45273 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: LeaseRenewer:jenkins.hfs.0@localhost:45273 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-1 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-4-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RpcClient-timer-pool-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-2-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async-Client-Retry-Timer-pool-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: region-location-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-5-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-3-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: nioEventLoopGroup-2-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-4-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: master/2113c16e5528:0:becomeActiveMaster-MemStoreChunkPool Statistics java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: regionserver/2113c16e5528:0.procedureResultReporter java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: master/2113c16e5528:0:becomeActiveMaster-MemStoreChunkPool Statistics java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: weak-ref-cleaner-strictcontextstorage java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-3-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-4-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Potentially hanging thread: HBase-Metrics2-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-4 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-5-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Idle-Rpc-Conn-Sweeper-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:45273 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:45273 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: SessionTracker java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Potentially hanging thread: nioEventLoopGroup-2-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-5-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: ForkJoinPool-2-worker-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: org.apache.hadoop.hdfs.PeerCache@262d32c9 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: Time-limited test.named-queue-events-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-1-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=404 (was 287) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=277 (was 320), ProcessCount=11 (was 11), AvailableMemoryMB=10028 (was 9361) - AvailableMemoryMB LEAK? - 2024-12-05T00:25:20,659 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnDatanodeDeath Thread=78, OpenFileDescriptor=404, MaxFileDescriptor=1048576, SystemLoadAverage=277, ProcessCount=11, AvailableMemoryMB=10028 2024-12-05T00:25:20,659 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-05T00:25:20,659 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/703f8370-3f4b-8740-767a-d662c6a0d9fc/hadoop.log.dir so I do NOT create it in target/test-data/79308d4f-cab8-23a5-7240-d751a6ba9c86 2024-12-05T00:25:20,659 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/703f8370-3f4b-8740-767a-d662c6a0d9fc/hadoop.tmp.dir so I do NOT create it in target/test-data/79308d4f-cab8-23a5-7240-d751a6ba9c86 2024-12-05T00:25:20,659 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/79308d4f-cab8-23a5-7240-d751a6ba9c86/cluster_f7f2c3ab-9744-9fc0-c1d0-7d1481f05e7a, deleteOnExit=true 2024-12-05T00:25:20,659 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-12-05T00:25:20,660 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/79308d4f-cab8-23a5-7240-d751a6ba9c86/test.cache.data in system properties and HBase conf 2024-12-05T00:25:20,660 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/79308d4f-cab8-23a5-7240-d751a6ba9c86/hadoop.tmp.dir in system properties and HBase conf 2024-12-05T00:25:20,660 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/79308d4f-cab8-23a5-7240-d751a6ba9c86/hadoop.log.dir in system properties and HBase conf 2024-12-05T00:25:20,660 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/79308d4f-cab8-23a5-7240-d751a6ba9c86/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-05T00:25:20,660 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/79308d4f-cab8-23a5-7240-d751a6ba9c86/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-05T00:25:20,660 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-05T00:25:20,661 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-05T00:25:20,661 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/79308d4f-cab8-23a5-7240-d751a6ba9c86/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-05T00:25:20,661 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/79308d4f-cab8-23a5-7240-d751a6ba9c86/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-05T00:25:20,661 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/79308d4f-cab8-23a5-7240-d751a6ba9c86/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-05T00:25:20,661 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/79308d4f-cab8-23a5-7240-d751a6ba9c86/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-05T00:25:20,662 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/79308d4f-cab8-23a5-7240-d751a6ba9c86/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-05T00:25:20,662 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/79308d4f-cab8-23a5-7240-d751a6ba9c86/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-05T00:25:20,662 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/79308d4f-cab8-23a5-7240-d751a6ba9c86/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-05T00:25:20,662 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/79308d4f-cab8-23a5-7240-d751a6ba9c86/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-05T00:25:20,662 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/79308d4f-cab8-23a5-7240-d751a6ba9c86/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-05T00:25:20,662 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/79308d4f-cab8-23a5-7240-d751a6ba9c86/nfs.dump.dir in system properties and HBase conf 2024-12-05T00:25:20,662 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/79308d4f-cab8-23a5-7240-d751a6ba9c86/java.io.tmpdir in system properties and HBase conf 2024-12-05T00:25:20,662 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/79308d4f-cab8-23a5-7240-d751a6ba9c86/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-05T00:25:20,662 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/79308d4f-cab8-23a5-7240-d751a6ba9c86/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-05T00:25:20,662 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/79308d4f-cab8-23a5-7240-d751a6ba9c86/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-05T00:25:20,682 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-05T00:25:20,777 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-05T00:25:20,785 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-05T00:25:20,791 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-05T00:25:20,791 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-05T00:25:20,791 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-05T00:25:20,792 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-05T00:25:20,793 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3197ca45{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/79308d4f-cab8-23a5-7240-d751a6ba9c86/hadoop.log.dir/,AVAILABLE} 2024-12-05T00:25:20,793 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@45e3157d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-05T00:25:20,915 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@511dc70f{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/79308d4f-cab8-23a5-7240-d751a6ba9c86/java.io.tmpdir/jetty-localhost-35295-hadoop-hdfs-3_4_1-tests_jar-_-any-7431162715316518105/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-05T00:25:20,916 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3e469283{HTTP/1.1, (http/1.1)}{localhost:35295} 2024-12-05T00:25:20,916 INFO [Time-limited test {}] server.Server(415): Started @101543ms 2024-12-05T00:25:20,930 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-05T00:25:21,041 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-05T00:25:21,046 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-05T00:25:21,047 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-05T00:25:21,048 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-05T00:25:21,048 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-05T00:25:21,048 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4edee9ab{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/79308d4f-cab8-23a5-7240-d751a6ba9c86/hadoop.log.dir/,AVAILABLE} 2024-12-05T00:25:21,049 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@276f8783{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-05T00:25:21,192 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5d4bdc00{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/79308d4f-cab8-23a5-7240-d751a6ba9c86/java.io.tmpdir/jetty-localhost-46575-hadoop-hdfs-3_4_1-tests_jar-_-any-1448573347258982700/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-05T00:25:21,192 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@687b21ce{HTTP/1.1, (http/1.1)}{localhost:46575} 2024-12-05T00:25:21,192 INFO [Time-limited test {}] server.Server(415): Started @101820ms 2024-12-05T00:25:21,195 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-05T00:25:21,239 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-05T00:25:21,244 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-05T00:25:21,246 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-05T00:25:21,246 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-05T00:25:21,246 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-05T00:25:21,247 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2b44e274{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/79308d4f-cab8-23a5-7240-d751a6ba9c86/hadoop.log.dir/,AVAILABLE} 2024-12-05T00:25:21,247 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@376d199b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-05T00:25:21,315 WARN [Thread-437 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/79308d4f-cab8-23a5-7240-d751a6ba9c86/cluster_f7f2c3ab-9744-9fc0-c1d0-7d1481f05e7a/data/data1/current/BP-1998844685-172.17.0.2-1733358320704/current, will proceed with Du for space computation calculation, 2024-12-05T00:25:21,316 WARN [Thread-438 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/79308d4f-cab8-23a5-7240-d751a6ba9c86/cluster_f7f2c3ab-9744-9fc0-c1d0-7d1481f05e7a/data/data2/current/BP-1998844685-172.17.0.2-1733358320704/current, will proceed with Du for space computation calculation, 2024-12-05T00:25:21,358 WARN [Thread-416 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-05T00:25:21,363 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x499cfb5ea7406826 with lease ID 0xc07be51a1674688a: Processing first storage report for DS-af3bd07d-3c09-41fd-b69c-ee9ae7542f9f from datanode DatanodeRegistration(127.0.0.1:45623, datanodeUuid=dcec96bf-fe24-4f43-9d8f-8fdd8e36515b, infoPort=40895, infoSecurePort=0, ipcPort=39959, storageInfo=lv=-57;cid=testClusterID;nsid=1518095144;c=1733358320704) 2024-12-05T00:25:21,363 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x499cfb5ea7406826 with lease ID 0xc07be51a1674688a: from storage DS-af3bd07d-3c09-41fd-b69c-ee9ae7542f9f node DatanodeRegistration(127.0.0.1:45623, datanodeUuid=dcec96bf-fe24-4f43-9d8f-8fdd8e36515b, infoPort=40895, infoSecurePort=0, ipcPort=39959, storageInfo=lv=-57;cid=testClusterID;nsid=1518095144;c=1733358320704), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-05T00:25:21,363 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x499cfb5ea7406826 with lease ID 0xc07be51a1674688a: Processing first storage report for DS-ded170eb-7963-41ea-a02e-ad4834a80e44 from datanode DatanodeRegistration(127.0.0.1:45623, datanodeUuid=dcec96bf-fe24-4f43-9d8f-8fdd8e36515b, infoPort=40895, infoSecurePort=0, ipcPort=39959, storageInfo=lv=-57;cid=testClusterID;nsid=1518095144;c=1733358320704) 2024-12-05T00:25:21,363 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x499cfb5ea7406826 with lease ID 0xc07be51a1674688a: from storage DS-ded170eb-7963-41ea-a02e-ad4834a80e44 node DatanodeRegistration(127.0.0.1:45623, datanodeUuid=dcec96bf-fe24-4f43-9d8f-8fdd8e36515b, infoPort=40895, infoSecurePort=0, ipcPort=39959, storageInfo=lv=-57;cid=testClusterID;nsid=1518095144;c=1733358320704), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-05T00:25:21,400 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@824b6ae{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/79308d4f-cab8-23a5-7240-d751a6ba9c86/java.io.tmpdir/jetty-localhost-44265-hadoop-hdfs-3_4_1-tests_jar-_-any-13167040415287790833/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-05T00:25:21,401 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5d7e5c06{HTTP/1.1, (http/1.1)}{localhost:44265} 2024-12-05T00:25:21,401 INFO [Time-limited test {}] server.Server(415): Started @102028ms 2024-12-05T00:25:21,403 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-05T00:25:21,548 WARN [Thread-463 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/79308d4f-cab8-23a5-7240-d751a6ba9c86/cluster_f7f2c3ab-9744-9fc0-c1d0-7d1481f05e7a/data/data3/current/BP-1998844685-172.17.0.2-1733358320704/current, will proceed with Du for space computation calculation, 2024-12-05T00:25:21,548 WARN [Thread-464 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/79308d4f-cab8-23a5-7240-d751a6ba9c86/cluster_f7f2c3ab-9744-9fc0-c1d0-7d1481f05e7a/data/data4/current/BP-1998844685-172.17.0.2-1733358320704/current, will proceed with Du for space computation calculation, 2024-12-05T00:25:21,581 WARN [Thread-452 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-05T00:25:21,585 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x80130bb6b8ffd111 with lease ID 0xc07be51a1674688b: Processing first storage report for DS-a064f017-3055-41d7-8ce4-782fe5123716 from datanode DatanodeRegistration(127.0.0.1:42699, datanodeUuid=e529cebd-dfdc-4ca8-b4bc-a0cc83f5208b, infoPort=35665, infoSecurePort=0, ipcPort=46445, storageInfo=lv=-57;cid=testClusterID;nsid=1518095144;c=1733358320704) 2024-12-05T00:25:21,585 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x80130bb6b8ffd111 with lease ID 0xc07be51a1674688b: from storage DS-a064f017-3055-41d7-8ce4-782fe5123716 node DatanodeRegistration(127.0.0.1:42699, datanodeUuid=e529cebd-dfdc-4ca8-b4bc-a0cc83f5208b, infoPort=35665, infoSecurePort=0, ipcPort=46445, storageInfo=lv=-57;cid=testClusterID;nsid=1518095144;c=1733358320704), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-05T00:25:21,585 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x80130bb6b8ffd111 with lease ID 0xc07be51a1674688b: Processing first storage report for DS-4d99fdc5-f684-4341-acb0-ee94c659fe56 from datanode DatanodeRegistration(127.0.0.1:42699, datanodeUuid=e529cebd-dfdc-4ca8-b4bc-a0cc83f5208b, infoPort=35665, infoSecurePort=0, ipcPort=46445, storageInfo=lv=-57;cid=testClusterID;nsid=1518095144;c=1733358320704) 2024-12-05T00:25:21,585 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x80130bb6b8ffd111 with lease ID 0xc07be51a1674688b: from storage DS-4d99fdc5-f684-4341-acb0-ee94c659fe56 node DatanodeRegistration(127.0.0.1:42699, datanodeUuid=e529cebd-dfdc-4ca8-b4bc-a0cc83f5208b, infoPort=35665, infoSecurePort=0, ipcPort=46445, storageInfo=lv=-57;cid=testClusterID;nsid=1518095144;c=1733358320704), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-05T00:25:21,653 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/79308d4f-cab8-23a5-7240-d751a6ba9c86 2024-12-05T00:25:21,659 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/79308d4f-cab8-23a5-7240-d751a6ba9c86/cluster_f7f2c3ab-9744-9fc0-c1d0-7d1481f05e7a/zookeeper_0, clientPort=49244, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/79308d4f-cab8-23a5-7240-d751a6ba9c86/cluster_f7f2c3ab-9744-9fc0-c1d0-7d1481f05e7a/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/79308d4f-cab8-23a5-7240-d751a6ba9c86/cluster_f7f2c3ab-9744-9fc0-c1d0-7d1481f05e7a/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-05T00:25:21,661 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=49244 2024-12-05T00:25:21,661 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-05T00:25:21,663 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-05T00:25:21,684 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073741825_1001 (size=7) 2024-12-05T00:25:21,684 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42699 is added to blk_1073741825_1001 (size=7) 2024-12-05T00:25:21,686 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:37323/user/jenkins/test-data/2ffb1745-b94d-756a-a14f-781704a26d2d with version=8 2024-12-05T00:25:21,686 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:45273/user/jenkins/test-data/7f70659c-3a14-bb99-79d3-4868f5326e37/hbase-staging 2024-12-05T00:25:21,689 INFO [Time-limited test {}] client.ConnectionUtils(128): master/2113c16e5528:0 server-side Connection retries=45 2024-12-05T00:25:21,690 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-05T00:25:21,690 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-05T00:25:21,690 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-05T00:25:21,690 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-05T00:25:21,690 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-05T00:25:21,690 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-05T00:25:21,690 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-05T00:25:21,693 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:44907 2024-12-05T00:25:21,694 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:44907 connecting to ZooKeeper ensemble=127.0.0.1:49244 2024-12-05T00:25:21,706 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:449070x0, quorum=127.0.0.1:49244, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-05T00:25:21,706 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:44907-0x1018002ea890000 connected 2024-12-05T00:25:21,734 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-05T00:25:21,737 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-05T00:25:21,741 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:44907-0x1018002ea890000, quorum=127.0.0.1:49244, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-05T00:25:21,741 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:37323/user/jenkins/test-data/2ffb1745-b94d-756a-a14f-781704a26d2d, hbase.cluster.distributed=false 2024-12-05T00:25:21,743 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:44907-0x1018002ea890000, quorum=127.0.0.1:49244, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-05T00:25:21,746 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=44907 2024-12-05T00:25:21,746 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=44907 2024-12-05T00:25:21,748 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=44907 2024-12-05T00:25:21,752 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=44907 2024-12-05T00:25:21,755 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=44907 2024-12-05T00:25:21,782 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/2113c16e5528:0 server-side Connection retries=45 2024-12-05T00:25:21,782 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-05T00:25:21,782 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-05T00:25:21,782 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-05T00:25:21,782 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-05T00:25:21,782 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-05T00:25:21,782 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-05T00:25:21,783 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-05T00:25:21,784 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:42255 2024-12-05T00:25:21,786 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:42255 connecting to ZooKeeper ensemble=127.0.0.1:49244 2024-12-05T00:25:21,787 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-05T00:25:21,790 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-05T00:25:21,797 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:422550x0, quorum=127.0.0.1:49244, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-05T00:25:21,798 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:422550x0, quorum=127.0.0.1:49244, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-05T00:25:21,798 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-05T00:25:21,801 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:42255-0x1018002ea890001 connected 2024-12-05T00:25:21,804 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-05T00:25:21,805 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42255-0x1018002ea890001, quorum=127.0.0.1:49244, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-05T00:25:21,807 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42255-0x1018002ea890001, quorum=127.0.0.1:49244, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-05T00:25:21,812 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=42255 2024-12-05T00:25:21,813 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=42255 2024-12-05T00:25:21,815 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=42255 2024-12-05T00:25:21,821 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=42255 2024-12-05T00:25:21,821 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=42255 2024-12-05T00:25:21,842 DEBUG [M:0;2113c16e5528:44907 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;2113c16e5528:44907 2024-12-05T00:25:21,844 INFO [master/2113c16e5528:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/2113c16e5528,44907,1733358321689 2024-12-05T00:25:21,847 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42255-0x1018002ea890001, quorum=127.0.0.1:49244, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-05T00:25:21,847 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44907-0x1018002ea890000, quorum=127.0.0.1:49244, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-05T00:25:21,847 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:44907-0x1018002ea890000, quorum=127.0.0.1:49244, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/2113c16e5528,44907,1733358321689 2024-12-05T00:25:21,850 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44907-0x1018002ea890000, quorum=127.0.0.1:49244, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T00:25:21,853 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:44907-0x1018002ea890000, quorum=127.0.0.1:49244, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-05T00:25:21,854 INFO [master/2113c16e5528:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/2113c16e5528,44907,1733358321689 from backup master directory 2024-12-05T00:25:21,854 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42255-0x1018002ea890001, quorum=127.0.0.1:49244, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-05T00:25:21,855 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42255-0x1018002ea890001, quorum=127.0.0.1:49244, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T00:25:21,859 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44907-0x1018002ea890000, quorum=127.0.0.1:49244, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/2113c16e5528,44907,1733358321689 2024-12-05T00:25:21,859 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44907-0x1018002ea890000, quorum=127.0.0.1:49244, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-05T00:25:21,859 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42255-0x1018002ea890001, quorum=127.0.0.1:49244, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-05T00:25:21,859 WARN [master/2113c16e5528:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-05T00:25:21,860 INFO [master/2113c16e5528:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=2113c16e5528,44907,1733358321689 2024-12-05T00:25:21,871 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:37323/user/jenkins/test-data/2ffb1745-b94d-756a-a14f-781704a26d2d/hbase.id] with ID: 4dd59478-282c-44c0-b43a-32d02bd2e7f3 2024-12-05T00:25:21,871 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:37323/user/jenkins/test-data/2ffb1745-b94d-756a-a14f-781704a26d2d/.tmp/hbase.id 2024-12-05T00:25:21,892 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073741826_1002 (size=42) 2024-12-05T00:25:21,892 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42699 is added to blk_1073741826_1002 (size=42) 2024-12-05T00:25:21,893 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:37323/user/jenkins/test-data/2ffb1745-b94d-756a-a14f-781704a26d2d/.tmp/hbase.id]:[hdfs://localhost:37323/user/jenkins/test-data/2ffb1745-b94d-756a-a14f-781704a26d2d/hbase.id] 2024-12-05T00:25:21,911 INFO [master/2113c16e5528:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-05T00:25:21,911 INFO [master/2113c16e5528:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-05T00:25:21,913 INFO [master/2113c16e5528:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-12-05T00:25:21,916 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42255-0x1018002ea890001, quorum=127.0.0.1:49244, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T00:25:21,916 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44907-0x1018002ea890000, quorum=127.0.0.1:49244, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T00:25:21,934 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073741827_1003 (size=196) 2024-12-05T00:25:21,935 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42699 is added to blk_1073741827_1003 (size=196) 2024-12-05T00:25:22,336 INFO [master/2113c16e5528:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-05T00:25:22,337 INFO [master/2113c16e5528:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-05T00:25:22,337 INFO [master/2113c16e5528:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-05T00:25:22,348 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42699 is added to blk_1073741828_1004 (size=1189) 2024-12-05T00:25:22,348 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073741828_1004 (size=1189) 2024-12-05T00:25:22,349 INFO [master/2113c16e5528:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:37323/user/jenkins/test-data/2ffb1745-b94d-756a-a14f-781704a26d2d/MasterData/data/master/store 2024-12-05T00:25:22,358 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073741829_1005 (size=34) 2024-12-05T00:25:22,358 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42699 is added to blk_1073741829_1005 (size=34) 2024-12-05T00:25:22,359 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T00:25:22,359 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-05T00:25:22,359 INFO [master/2113c16e5528:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-05T00:25:22,359 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-05T00:25:22,359 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-05T00:25:22,359 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-05T00:25:22,359 INFO [master/2113c16e5528:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-05T00:25:22,360 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733358322359Disabling compacts and flushes for region at 1733358322359Disabling writes for close at 1733358322359Writing region close event to WAL at 1733358322359Closed at 1733358322359 2024-12-05T00:25:22,361 WARN [master/2113c16e5528:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:37323/user/jenkins/test-data/2ffb1745-b94d-756a-a14f-781704a26d2d/MasterData/data/master/store/.initializing 2024-12-05T00:25:22,362 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:37323/user/jenkins/test-data/2ffb1745-b94d-756a-a14f-781704a26d2d/MasterData/WALs/2113c16e5528,44907,1733358321689 2024-12-05T00:25:22,365 INFO [master/2113c16e5528:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=2113c16e5528%2C44907%2C1733358321689, suffix=, logDir=hdfs://localhost:37323/user/jenkins/test-data/2ffb1745-b94d-756a-a14f-781704a26d2d/MasterData/WALs/2113c16e5528,44907,1733358321689, archiveDir=hdfs://localhost:37323/user/jenkins/test-data/2ffb1745-b94d-756a-a14f-781704a26d2d/MasterData/oldWALs, maxLogs=10 2024-12-05T00:25:22,366 INFO [master/2113c16e5528:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 2113c16e5528%2C44907%2C1733358321689.1733358322366 2024-12-05T00:25:22,376 INFO [master/2113c16e5528:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/2ffb1745-b94d-756a-a14f-781704a26d2d/MasterData/WALs/2113c16e5528,44907,1733358321689/2113c16e5528%2C44907%2C1733358321689.1733358322366 2024-12-05T00:25:22,377 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35665:35665),(127.0.0.1/127.0.0.1:40895:40895)] 2024-12-05T00:25:22,377 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-05T00:25:22,378 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T00:25:22,378 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-05T00:25:22,378 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-05T00:25:22,379 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-05T00:25:22,381 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-05T00:25:22,381 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T00:25:22,382 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-05T00:25:22,382 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-05T00:25:22,384 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-05T00:25:22,384 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T00:25:22,384 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T00:25:22,385 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-05T00:25:22,387 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-05T00:25:22,388 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T00:25:22,388 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T00:25:22,388 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-05T00:25:22,390 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-05T00:25:22,390 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T00:25:22,390 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T00:25:22,391 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-05T00:25:22,392 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37323/user/jenkins/test-data/2ffb1745-b94d-756a-a14f-781704a26d2d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-05T00:25:22,392 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37323/user/jenkins/test-data/2ffb1745-b94d-756a-a14f-781704a26d2d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-05T00:25:22,394 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-05T00:25:22,394 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-05T00:25:22,395 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-05T00:25:22,396 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-05T00:25:22,399 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37323/user/jenkins/test-data/2ffb1745-b94d-756a-a14f-781704a26d2d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-05T00:25:22,399 INFO [master/2113c16e5528:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=744545, jitterRate=-0.0532626211643219}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-05T00:25:22,401 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733358322378Initializing all the Stores at 1733358322379 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733358322379Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733358322379Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733358322379Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733358322379Cleaning up temporary data from old regions at 1733358322394 (+15 ms)Region opened successfully at 1733358322401 (+7 ms) 2024-12-05T00:25:22,401 INFO [master/2113c16e5528:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-05T00:25:22,406 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5f2e7564, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=2113c16e5528/172.17.0.2:0 2024-12-05T00:25:22,407 INFO [master/2113c16e5528:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-05T00:25:22,407 INFO [master/2113c16e5528:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-05T00:25:22,407 INFO [master/2113c16e5528:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-05T00:25:22,408 INFO [master/2113c16e5528:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-05T00:25:22,408 INFO [master/2113c16e5528:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-12-05T00:25:22,409 INFO [master/2113c16e5528:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-12-05T00:25:22,409 INFO [master/2113c16e5528:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-05T00:25:22,415 INFO [master/2113c16e5528:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-05T00:25:22,416 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44907-0x1018002ea890000, quorum=127.0.0.1:49244, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-05T00:25:22,417 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-05T00:25:22,418 INFO [master/2113c16e5528:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-05T00:25:22,418 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44907-0x1018002ea890000, quorum=127.0.0.1:49244, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-05T00:25:22,420 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-05T00:25:22,420 INFO [master/2113c16e5528:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-05T00:25:22,421 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44907-0x1018002ea890000, quorum=127.0.0.1:49244, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-05T00:25:22,424 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-05T00:25:22,425 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44907-0x1018002ea890000, quorum=127.0.0.1:49244, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-05T00:25:22,426 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-05T00:25:22,428 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44907-0x1018002ea890000, quorum=127.0.0.1:49244, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-05T00:25:22,429 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-05T00:25:22,431 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44907-0x1018002ea890000, quorum=127.0.0.1:49244, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-05T00:25:22,431 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42255-0x1018002ea890001, quorum=127.0.0.1:49244, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-05T00:25:22,431 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42255-0x1018002ea890001, quorum=127.0.0.1:49244, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T00:25:22,431 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44907-0x1018002ea890000, quorum=127.0.0.1:49244, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T00:25:22,431 INFO [master/2113c16e5528:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=2113c16e5528,44907,1733358321689, sessionid=0x1018002ea890000, setting cluster-up flag (Was=false) 2024-12-05T00:25:22,435 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44907-0x1018002ea890000, quorum=127.0.0.1:49244, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T00:25:22,435 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42255-0x1018002ea890001, quorum=127.0.0.1:49244, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T00:25:22,443 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-05T00:25:22,445 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=2113c16e5528,44907,1733358321689 2024-12-05T00:25:22,448 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42255-0x1018002ea890001, quorum=127.0.0.1:49244, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T00:25:22,448 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44907-0x1018002ea890000, quorum=127.0.0.1:49244, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T00:25:22,457 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-05T00:25:22,459 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=2113c16e5528,44907,1733358321689 2024-12-05T00:25:22,460 INFO [master/2113c16e5528:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:37323/user/jenkins/test-data/2ffb1745-b94d-756a-a14f-781704a26d2d/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-05T00:25:22,463 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-05T00:25:22,463 INFO [master/2113c16e5528:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-05T00:25:22,463 INFO [master/2113c16e5528:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-05T00:25:22,464 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 2113c16e5528,44907,1733358321689 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-05T00:25:22,466 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/2113c16e5528:0, corePoolSize=5, maxPoolSize=5 2024-12-05T00:25:22,466 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/2113c16e5528:0, corePoolSize=5, maxPoolSize=5 2024-12-05T00:25:22,466 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/2113c16e5528:0, corePoolSize=5, maxPoolSize=5 2024-12-05T00:25:22,466 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/2113c16e5528:0, corePoolSize=5, maxPoolSize=5 2024-12-05T00:25:22,466 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/2113c16e5528:0, corePoolSize=10, maxPoolSize=10 2024-12-05T00:25:22,466 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/2113c16e5528:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:25:22,466 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/2113c16e5528:0, corePoolSize=2, maxPoolSize=2 2024-12-05T00:25:22,466 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/2113c16e5528:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:25:22,469 INFO [master/2113c16e5528:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733358352469 2024-12-05T00:25:22,469 INFO [master/2113c16e5528:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-05T00:25:22,469 INFO [master/2113c16e5528:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-05T00:25:22,469 INFO [master/2113c16e5528:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-05T00:25:22,469 INFO [master/2113c16e5528:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-05T00:25:22,469 INFO [master/2113c16e5528:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-05T00:25:22,469 INFO [master/2113c16e5528:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-05T00:25:22,470 INFO [master/2113c16e5528:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-05T00:25:22,472 INFO [master/2113c16e5528:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-05T00:25:22,473 INFO [master/2113c16e5528:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-05T00:25:22,473 INFO [master/2113c16e5528:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-05T00:25:22,474 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-05T00:25:22,474 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-05T00:25:22,475 INFO [master/2113c16e5528:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-05T00:25:22,475 INFO [master/2113c16e5528:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-05T00:25:22,475 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/2113c16e5528:0:becomeActiveMaster-HFileCleaner.large.0-1733358322475,5,FailOnTimeoutGroup] 2024-12-05T00:25:22,475 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T00:25:22,476 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-05T00:25:22,476 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/2113c16e5528:0:becomeActiveMaster-HFileCleaner.small.0-1733358322475,5,FailOnTimeoutGroup] 2024-12-05T00:25:22,476 INFO [master/2113c16e5528:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-05T00:25:22,476 INFO [master/2113c16e5528:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-05T00:25:22,476 INFO [master/2113c16e5528:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-05T00:25:22,477 INFO [master/2113c16e5528:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-05T00:25:22,488 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42699 is added to blk_1073741831_1007 (size=1321) 2024-12-05T00:25:22,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073741831_1007 (size=1321) 2024-12-05T00:25:22,490 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:37323/user/jenkins/test-data/2ffb1745-b94d-756a-a14f-781704a26d2d/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-05T00:25:22,490 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:37323/user/jenkins/test-data/2ffb1745-b94d-756a-a14f-781704a26d2d 2024-12-05T00:25:22,500 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073741832_1008 (size=32) 2024-12-05T00:25:22,500 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42699 is added to blk_1073741832_1008 (size=32) 2024-12-05T00:25:22,501 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T00:25:22,511 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-05T00:25:22,513 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-05T00:25:22,514 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T00:25:22,514 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-05T00:25:22,514 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-05T00:25:22,516 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-05T00:25:22,516 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T00:25:22,517 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-05T00:25:22,517 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-05T00:25:22,519 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-05T00:25:22,519 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T00:25:22,519 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-05T00:25:22,520 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-05T00:25:22,521 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-05T00:25:22,521 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T00:25:22,522 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-05T00:25:22,522 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-05T00:25:22,523 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37323/user/jenkins/test-data/2ffb1745-b94d-756a-a14f-781704a26d2d/data/hbase/meta/1588230740 2024-12-05T00:25:22,524 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37323/user/jenkins/test-data/2ffb1745-b94d-756a-a14f-781704a26d2d/data/hbase/meta/1588230740 2024-12-05T00:25:22,524 INFO [RS:0;2113c16e5528:42255 {}] regionserver.HRegionServer(746): ClusterId : 4dd59478-282c-44c0-b43a-32d02bd2e7f3 2024-12-05T00:25:22,524 DEBUG [RS:0;2113c16e5528:42255 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-05T00:25:22,526 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-05T00:25:22,526 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-05T00:25:22,526 DEBUG [RS:0;2113c16e5528:42255 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-05T00:25:22,526 DEBUG [RS:0;2113c16e5528:42255 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-05T00:25:22,527 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-05T00:25:22,528 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-05T00:25:22,529 DEBUG [RS:0;2113c16e5528:42255 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-05T00:25:22,530 DEBUG [RS:0;2113c16e5528:42255 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6cc8daf6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=2113c16e5528/172.17.0.2:0 2024-12-05T00:25:22,532 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37323/user/jenkins/test-data/2ffb1745-b94d-756a-a14f-781704a26d2d/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-05T00:25:22,533 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=778594, jitterRate=-0.009967714548110962}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-05T00:25:22,534 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733358322501Initializing all the Stores at 1733358322503 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733358322503Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733358322511 (+8 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733358322511Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733358322511Cleaning up temporary data from old regions at 1733358322526 (+15 ms)Region opened successfully at 1733358322534 (+8 ms) 2024-12-05T00:25:22,535 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-05T00:25:22,535 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-05T00:25:22,535 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-05T00:25:22,535 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-05T00:25:22,535 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-05T00:25:22,537 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-05T00:25:22,537 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733358322535Disabling compacts and flushes for region at 1733358322535Disabling writes for close at 1733358322535Writing region close event to WAL at 1733358322536 (+1 ms)Closed at 1733358322536 2024-12-05T00:25:22,539 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-05T00:25:22,539 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-05T00:25:22,539 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-05T00:25:22,543 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-05T00:25:22,545 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-05T00:25:22,547 DEBUG [RS:0;2113c16e5528:42255 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;2113c16e5528:42255 2024-12-05T00:25:22,547 INFO [RS:0;2113c16e5528:42255 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-05T00:25:22,547 INFO [RS:0;2113c16e5528:42255 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-05T00:25:22,547 DEBUG [RS:0;2113c16e5528:42255 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-05T00:25:22,548 INFO [RS:0;2113c16e5528:42255 {}] regionserver.HRegionServer(2659): reportForDuty to master=2113c16e5528,44907,1733358321689 with port=42255, startcode=1733358321781 2024-12-05T00:25:22,548 DEBUG [RS:0;2113c16e5528:42255 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-05T00:25:22,556 INFO [HMaster-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52365, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-12-05T00:25:22,557 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44907 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 2113c16e5528,42255,1733358321781 2024-12-05T00:25:22,557 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44907 {}] master.ServerManager(517): Registering regionserver=2113c16e5528,42255,1733358321781 2024-12-05T00:25:22,561 DEBUG [RS:0;2113c16e5528:42255 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:37323/user/jenkins/test-data/2ffb1745-b94d-756a-a14f-781704a26d2d 2024-12-05T00:25:22,561 DEBUG [RS:0;2113c16e5528:42255 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:37323 2024-12-05T00:25:22,561 DEBUG [RS:0;2113c16e5528:42255 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-05T00:25:22,565 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44907-0x1018002ea890000, quorum=127.0.0.1:49244, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-05T00:25:22,566 DEBUG [RS:0;2113c16e5528:42255 {}] zookeeper.ZKUtil(111): regionserver:42255-0x1018002ea890001, quorum=127.0.0.1:49244, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/2113c16e5528,42255,1733358321781 2024-12-05T00:25:22,566 WARN [RS:0;2113c16e5528:42255 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-05T00:25:22,566 INFO [RS:0;2113c16e5528:42255 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-05T00:25:22,567 DEBUG [RS:0;2113c16e5528:42255 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:37323/user/jenkins/test-data/2ffb1745-b94d-756a-a14f-781704a26d2d/WALs/2113c16e5528,42255,1733358321781 2024-12-05T00:25:22,567 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [2113c16e5528,42255,1733358321781] 2024-12-05T00:25:22,573 INFO [RS:0;2113c16e5528:42255 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-05T00:25:22,580 INFO [RS:0;2113c16e5528:42255 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-05T00:25:22,581 INFO [RS:0;2113c16e5528:42255 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-05T00:25:22,581 INFO [RS:0;2113c16e5528:42255 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-05T00:25:22,581 INFO [RS:0;2113c16e5528:42255 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-05T00:25:22,583 INFO [RS:0;2113c16e5528:42255 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-05T00:25:22,583 INFO [RS:0;2113c16e5528:42255 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-05T00:25:22,583 DEBUG [RS:0;2113c16e5528:42255 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/2113c16e5528:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:25:22,583 DEBUG [RS:0;2113c16e5528:42255 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/2113c16e5528:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:25:22,583 DEBUG [RS:0;2113c16e5528:42255 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/2113c16e5528:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:25:22,583 DEBUG [RS:0;2113c16e5528:42255 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/2113c16e5528:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:25:22,583 DEBUG [RS:0;2113c16e5528:42255 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/2113c16e5528:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:25:22,584 DEBUG [RS:0;2113c16e5528:42255 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/2113c16e5528:0, corePoolSize=2, maxPoolSize=2 2024-12-05T00:25:22,584 DEBUG [RS:0;2113c16e5528:42255 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/2113c16e5528:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:25:22,584 DEBUG [RS:0;2113c16e5528:42255 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/2113c16e5528:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:25:22,584 DEBUG [RS:0;2113c16e5528:42255 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/2113c16e5528:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:25:22,584 DEBUG [RS:0;2113c16e5528:42255 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/2113c16e5528:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:25:22,584 DEBUG [RS:0;2113c16e5528:42255 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/2113c16e5528:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:25:22,584 DEBUG [RS:0;2113c16e5528:42255 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/2113c16e5528:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:25:22,584 DEBUG [RS:0;2113c16e5528:42255 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0, corePoolSize=3, maxPoolSize=3 2024-12-05T00:25:22,584 DEBUG [RS:0;2113c16e5528:42255 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/2113c16e5528:0, corePoolSize=3, maxPoolSize=3 2024-12-05T00:25:22,594 INFO [RS:0;2113c16e5528:42255 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-05T00:25:22,594 INFO [RS:0;2113c16e5528:42255 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-05T00:25:22,594 INFO [RS:0;2113c16e5528:42255 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-05T00:25:22,594 INFO [RS:0;2113c16e5528:42255 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-05T00:25:22,594 INFO [RS:0;2113c16e5528:42255 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-05T00:25:22,594 INFO [RS:0;2113c16e5528:42255 {}] hbase.ChoreService(168): Chore ScheduledChore name=2113c16e5528,42255,1733358321781-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-05T00:25:22,611 INFO [RS:0;2113c16e5528:42255 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-05T00:25:22,611 INFO [RS:0;2113c16e5528:42255 {}] hbase.ChoreService(168): Chore ScheduledChore name=2113c16e5528,42255,1733358321781-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-05T00:25:22,611 INFO [RS:0;2113c16e5528:42255 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-05T00:25:22,611 INFO [RS:0;2113c16e5528:42255 {}] regionserver.Replication(171): 2113c16e5528,42255,1733358321781 started 2024-12-05T00:25:22,628 INFO [RS:0;2113c16e5528:42255 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-05T00:25:22,629 INFO [RS:0;2113c16e5528:42255 {}] regionserver.HRegionServer(1482): Serving as 2113c16e5528,42255,1733358321781, RpcServer on 2113c16e5528/172.17.0.2:42255, sessionid=0x1018002ea890001 2024-12-05T00:25:22,629 DEBUG [RS:0;2113c16e5528:42255 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-05T00:25:22,629 DEBUG [RS:0;2113c16e5528:42255 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 2113c16e5528,42255,1733358321781 2024-12-05T00:25:22,629 DEBUG [RS:0;2113c16e5528:42255 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '2113c16e5528,42255,1733358321781' 2024-12-05T00:25:22,629 DEBUG [RS:0;2113c16e5528:42255 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-05T00:25:22,630 DEBUG [RS:0;2113c16e5528:42255 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-05T00:25:22,630 DEBUG [RS:0;2113c16e5528:42255 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-05T00:25:22,630 DEBUG [RS:0;2113c16e5528:42255 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-05T00:25:22,630 DEBUG [RS:0;2113c16e5528:42255 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 2113c16e5528,42255,1733358321781 2024-12-05T00:25:22,630 DEBUG [RS:0;2113c16e5528:42255 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '2113c16e5528,42255,1733358321781' 2024-12-05T00:25:22,630 DEBUG [RS:0;2113c16e5528:42255 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-05T00:25:22,631 DEBUG [RS:0;2113c16e5528:42255 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-05T00:25:22,631 DEBUG [RS:0;2113c16e5528:42255 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-05T00:25:22,631 INFO [RS:0;2113c16e5528:42255 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-05T00:25:22,631 INFO [RS:0;2113c16e5528:42255 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-05T00:25:22,695 WARN [2113c16e5528:44907 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-05T00:25:22,734 INFO [RS:0;2113c16e5528:42255 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=2113c16e5528%2C42255%2C1733358321781, suffix=, logDir=hdfs://localhost:37323/user/jenkins/test-data/2ffb1745-b94d-756a-a14f-781704a26d2d/WALs/2113c16e5528,42255,1733358321781, archiveDir=hdfs://localhost:37323/user/jenkins/test-data/2ffb1745-b94d-756a-a14f-781704a26d2d/oldWALs, maxLogs=32 2024-12-05T00:25:22,736 INFO [RS:0;2113c16e5528:42255 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 2113c16e5528%2C42255%2C1733358321781.1733358322736 2024-12-05T00:25:22,746 INFO [RS:0;2113c16e5528:42255 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/2ffb1745-b94d-756a-a14f-781704a26d2d/WALs/2113c16e5528,42255,1733358321781/2113c16e5528%2C42255%2C1733358321781.1733358322736 2024-12-05T00:25:22,751 DEBUG [RS:0;2113c16e5528:42255 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40895:40895),(127.0.0.1/127.0.0.1:35665:35665)] 2024-12-05T00:25:22,945 DEBUG [2113c16e5528:44907 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-05T00:25:22,946 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=2113c16e5528,42255,1733358321781 2024-12-05T00:25:22,948 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 2113c16e5528,42255,1733358321781, state=OPENING 2024-12-05T00:25:22,950 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-05T00:25:22,952 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44907-0x1018002ea890000, quorum=127.0.0.1:49244, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T00:25:22,952 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42255-0x1018002ea890001, quorum=127.0.0.1:49244, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T00:25:22,952 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-05T00:25:22,953 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-05T00:25:22,953 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-05T00:25:22,953 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=2113c16e5528,42255,1733358321781}] 2024-12-05T00:25:23,107 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-05T00:25:23,110 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52783, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-05T00:25:23,115 INFO [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-05T00:25:23,115 INFO [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-05T00:25:23,118 INFO [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=2113c16e5528%2C42255%2C1733358321781.meta, suffix=.meta, logDir=hdfs://localhost:37323/user/jenkins/test-data/2ffb1745-b94d-756a-a14f-781704a26d2d/WALs/2113c16e5528,42255,1733358321781, archiveDir=hdfs://localhost:37323/user/jenkins/test-data/2ffb1745-b94d-756a-a14f-781704a26d2d/oldWALs, maxLogs=32 2024-12-05T00:25:23,119 INFO [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 2113c16e5528%2C42255%2C1733358321781.meta.1733358323119.meta 2024-12-05T00:25:23,130 INFO [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/2ffb1745-b94d-756a-a14f-781704a26d2d/WALs/2113c16e5528,42255,1733358321781/2113c16e5528%2C42255%2C1733358321781.meta.1733358323119.meta 2024-12-05T00:25:23,131 DEBUG [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40895:40895),(127.0.0.1/127.0.0.1:35665:35665)] 2024-12-05T00:25:23,131 DEBUG [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-05T00:25:23,132 DEBUG [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-05T00:25:23,132 DEBUG [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-05T00:25:23,132 INFO [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-05T00:25:23,132 DEBUG [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-05T00:25:23,132 DEBUG [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T00:25:23,132 DEBUG [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-05T00:25:23,132 DEBUG [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-05T00:25:23,134 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-05T00:25:23,136 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-05T00:25:23,136 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T00:25:23,136 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-05T00:25:23,136 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-05T00:25:23,137 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-05T00:25:23,137 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T00:25:23,138 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-05T00:25:23,138 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-05T00:25:23,139 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-05T00:25:23,139 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T00:25:23,140 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-05T00:25:23,140 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-05T00:25:23,141 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-05T00:25:23,141 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T00:25:23,141 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-05T00:25:23,142 DEBUG [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-05T00:25:23,142 DEBUG [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37323/user/jenkins/test-data/2ffb1745-b94d-756a-a14f-781704a26d2d/data/hbase/meta/1588230740 2024-12-05T00:25:23,144 DEBUG [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37323/user/jenkins/test-data/2ffb1745-b94d-756a-a14f-781704a26d2d/data/hbase/meta/1588230740 2024-12-05T00:25:23,146 DEBUG [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-05T00:25:23,146 DEBUG [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-05T00:25:23,146 DEBUG [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-05T00:25:23,148 DEBUG [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-05T00:25:23,150 INFO [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=798269, jitterRate=0.015051737427711487}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-05T00:25:23,150 DEBUG [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-05T00:25:23,152 DEBUG [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733358323133Writing region info on filesystem at 1733358323133Initializing all the Stores at 1733358323134 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733358323134Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733358323134Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733358323134Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733358323134Cleaning up temporary data from old regions at 1733358323146 (+12 ms)Running coprocessor post-open hooks at 1733358323150 (+4 ms)Region opened successfully at 1733358323152 (+2 ms) 2024-12-05T00:25:23,153 INFO [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733358323106 2024-12-05T00:25:23,157 DEBUG [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-05T00:25:23,157 INFO [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-05T00:25:23,159 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=2113c16e5528,42255,1733358321781 2024-12-05T00:25:23,160 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 2113c16e5528,42255,1733358321781, state=OPEN 2024-12-05T00:25:23,170 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44907-0x1018002ea890000, quorum=127.0.0.1:49244, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-05T00:25:23,170 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42255-0x1018002ea890001, quorum=127.0.0.1:49244, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-05T00:25:23,170 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=2113c16e5528,42255,1733358321781 2024-12-05T00:25:23,170 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-05T00:25:23,170 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-05T00:25:23,174 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-05T00:25:23,174 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=2113c16e5528,42255,1733358321781 in 217 msec 2024-12-05T00:25:23,177 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-05T00:25:23,177 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 635 msec 2024-12-05T00:25:23,178 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-05T00:25:23,178 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-05T00:25:23,180 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-05T00:25:23,180 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=2113c16e5528,42255,1733358321781, seqNum=-1] 2024-12-05T00:25:23,181 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-05T00:25:23,183 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33487, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-05T00:25:23,191 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 728 msec 2024-12-05T00:25:23,191 INFO [master/2113c16e5528:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733358323191, completionTime=-1 2024-12-05T00:25:23,191 INFO [master/2113c16e5528:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-05T00:25:23,192 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-05T00:25:23,194 INFO [master/2113c16e5528:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-12-05T00:25:23,194 INFO [master/2113c16e5528:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733358383194 2024-12-05T00:25:23,194 INFO [master/2113c16e5528:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733358443194 2024-12-05T00:25:23,194 INFO [master/2113c16e5528:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-12-05T00:25:23,194 INFO [master/2113c16e5528:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=2113c16e5528,44907,1733358321689-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-05T00:25:23,194 INFO [master/2113c16e5528:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=2113c16e5528,44907,1733358321689-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-05T00:25:23,195 INFO [master/2113c16e5528:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=2113c16e5528,44907,1733358321689-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-05T00:25:23,195 INFO [master/2113c16e5528:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-2113c16e5528:44907, period=300000, unit=MILLISECONDS is enabled. 2024-12-05T00:25:23,195 INFO [master/2113c16e5528:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-05T00:25:23,195 INFO [master/2113c16e5528:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-05T00:25:23,197 DEBUG [master/2113c16e5528:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-05T00:25:23,201 INFO [master/2113c16e5528:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.341sec 2024-12-05T00:25:23,201 INFO [master/2113c16e5528:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-05T00:25:23,201 INFO [master/2113c16e5528:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-05T00:25:23,201 INFO [master/2113c16e5528:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-05T00:25:23,201 INFO [master/2113c16e5528:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-05T00:25:23,201 INFO [master/2113c16e5528:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-05T00:25:23,201 INFO [master/2113c16e5528:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=2113c16e5528,44907,1733358321689-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-05T00:25:23,201 INFO [master/2113c16e5528:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=2113c16e5528,44907,1733358321689-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-05T00:25:23,204 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-05T00:25:23,204 INFO [master/2113c16e5528:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-05T00:25:23,204 INFO [master/2113c16e5528:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=2113c16e5528,44907,1733358321689-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-05T00:25:23,224 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5ffb657f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T00:25:23,225 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 2113c16e5528,44907,-1 for getting cluster id 2024-12-05T00:25:23,225 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-05T00:25:23,228 DEBUG [HMaster-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '4dd59478-282c-44c0-b43a-32d02bd2e7f3' 2024-12-05T00:25:23,228 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-05T00:25:23,229 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "4dd59478-282c-44c0-b43a-32d02bd2e7f3" 2024-12-05T00:25:23,229 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7047af98, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T00:25:23,229 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [2113c16e5528,44907,-1] 2024-12-05T00:25:23,229 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-05T00:25:23,230 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T00:25:23,231 INFO [HMaster-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53788, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-05T00:25:23,233 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2e5d9cb3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T00:25:23,233 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-05T00:25:23,235 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=2113c16e5528,42255,1733358321781, seqNum=-1] 2024-12-05T00:25:23,236 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-05T00:25:23,238 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34348, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-05T00:25:23,240 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=2113c16e5528,44907,1733358321689 2024-12-05T00:25:23,241 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-05T00:25:23,245 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-12-05T00:25:23,245 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-05T00:25:23,245 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-05T00:25:23,245 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at org.apache.hadoop.hbase.regionserver.wal.TestLogRolling.testLogRollOnDatanodeDeath(TestLogRolling.java:201) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-05T00:25:23,245 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T00:25:23,246 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T00:25:23,246 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-05T00:25:23,246 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-05T00:25:23,246 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1644424511, stopped=false 2024-12-05T00:25:23,246 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=2113c16e5528,44907,1733358321689 2024-12-05T00:25:23,248 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44907-0x1018002ea890000, quorum=127.0.0.1:49244, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-05T00:25:23,248 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42255-0x1018002ea890001, quorum=127.0.0.1:49244, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-05T00:25:23,248 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44907-0x1018002ea890000, quorum=127.0.0.1:49244, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T00:25:23,248 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42255-0x1018002ea890001, quorum=127.0.0.1:49244, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T00:25:23,248 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-05T00:25:23,248 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-05T00:25:23,248 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at org.apache.hadoop.hbase.regionserver.wal.TestLogRolling.testLogRollOnDatanodeDeath(TestLogRolling.java:201) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-05T00:25:23,248 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T00:25:23,249 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '2113c16e5528,42255,1733358321781' ***** 2024-12-05T00:25:23,249 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:42255-0x1018002ea890001, quorum=127.0.0.1:49244, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-05T00:25:23,249 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-05T00:25:23,249 INFO [RS:0;2113c16e5528:42255 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-05T00:25:23,249 INFO [RS:0;2113c16e5528:42255 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-05T00:25:23,249 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-05T00:25:23,249 INFO [RS:0;2113c16e5528:42255 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-05T00:25:23,249 INFO [RS:0;2113c16e5528:42255 {}] regionserver.HRegionServer(959): stopping server 2113c16e5528,42255,1733358321781 2024-12-05T00:25:23,249 INFO [RS:0;2113c16e5528:42255 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-05T00:25:23,249 INFO [RS:0;2113c16e5528:42255 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;2113c16e5528:42255. 2024-12-05T00:25:23,250 DEBUG [RS:0;2113c16e5528:42255 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-05T00:25:23,250 DEBUG [RS:0;2113c16e5528:42255 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T00:25:23,250 INFO [RS:0;2113c16e5528:42255 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-05T00:25:23,250 INFO [RS:0;2113c16e5528:42255 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-05T00:25:23,250 INFO [RS:0;2113c16e5528:42255 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-05T00:25:23,250 INFO [RS:0;2113c16e5528:42255 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-05T00:25:23,250 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:44907-0x1018002ea890000, quorum=127.0.0.1:49244, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-05T00:25:23,250 INFO [RS:0;2113c16e5528:42255 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-12-05T00:25:23,250 DEBUG [RS:0;2113c16e5528:42255 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-12-05T00:25:23,250 DEBUG [RS:0;2113c16e5528:42255 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-12-05T00:25:23,250 DEBUG [RS_CLOSE_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-05T00:25:23,250 INFO [RS_CLOSE_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-05T00:25:23,251 DEBUG [RS_CLOSE_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-05T00:25:23,251 DEBUG [RS_CLOSE_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-05T00:25:23,251 DEBUG [RS_CLOSE_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-05T00:25:23,251 INFO [RS_CLOSE_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=74 B heapSize=1.22 KB 2024-12-05T00:25:23,270 DEBUG [RS_CLOSE_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37323/user/jenkins/test-data/2ffb1745-b94d-756a-a14f-781704a26d2d/data/hbase/meta/1588230740/.tmp/ns/e4694f41ada84bbcb9491016a0e51f7e is 43, key is default/ns:d/1733358323184/Put/seqid=0 2024-12-05T00:25:23,280 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42699 is added to blk_1073741835_1011 (size=5153) 2024-12-05T00:25:23,280 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073741835_1011 (size=5153) 2024-12-05T00:25:23,281 INFO [RS_CLOSE_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:37323/user/jenkins/test-data/2ffb1745-b94d-756a-a14f-781704a26d2d/data/hbase/meta/1588230740/.tmp/ns/e4694f41ada84bbcb9491016a0e51f7e 2024-12-05T00:25:23,289 DEBUG [RS_CLOSE_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37323/user/jenkins/test-data/2ffb1745-b94d-756a-a14f-781704a26d2d/data/hbase/meta/1588230740/.tmp/ns/e4694f41ada84bbcb9491016a0e51f7e as hdfs://localhost:37323/user/jenkins/test-data/2ffb1745-b94d-756a-a14f-781704a26d2d/data/hbase/meta/1588230740/ns/e4694f41ada84bbcb9491016a0e51f7e 2024-12-05T00:25:23,296 INFO [RS_CLOSE_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37323/user/jenkins/test-data/2ffb1745-b94d-756a-a14f-781704a26d2d/data/hbase/meta/1588230740/ns/e4694f41ada84bbcb9491016a0e51f7e, entries=2, sequenceid=6, filesize=5.0 K 2024-12-05T00:25:23,298 INFO [RS_CLOSE_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 47ms, sequenceid=6, compaction requested=false 2024-12-05T00:25:23,298 DEBUG [RS_CLOSE_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_META}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-05T00:25:23,304 DEBUG [RS_CLOSE_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37323/user/jenkins/test-data/2ffb1745-b94d-756a-a14f-781704a26d2d/data/hbase/meta/1588230740/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-05T00:25:23,305 DEBUG [RS_CLOSE_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-05T00:25:23,305 INFO [RS_CLOSE_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-05T00:25:23,305 DEBUG [RS_CLOSE_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733358323250Running coprocessor pre-close hooks at 1733358323250Disabling compacts and flushes for region at 1733358323250Disabling writes for close at 1733358323251 (+1 ms)Obtaining lock to block concurrent updates at 1733358323251Preparing flush snapshotting stores in 1588230740 at 1733358323251Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=74, getHeapSize=1184, getOffHeapSize=0, getCellsCount=2 at 1733358323251Flushing stores of hbase:meta,,1.1588230740 at 1733358323252 (+1 ms)Flushing 1588230740/ns: creating writer at 1733358323252Flushing 1588230740/ns: appending metadata at 1733358323269 (+17 ms)Flushing 1588230740/ns: closing flushed file at 1733358323269Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@27da4f93: reopening flushed file at 1733358323288 (+19 ms)Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 47ms, sequenceid=6, compaction requested=false at 1733358323298 (+10 ms)Writing region close event to WAL at 1733358323300 (+2 ms)Running coprocessor post-close hooks at 1733358323305 (+5 ms)Closed at 1733358323305 2024-12-05T00:25:23,305 DEBUG [RS_CLOSE_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-05T00:25:23,451 INFO [RS:0;2113c16e5528:42255 {}] regionserver.HRegionServer(976): stopping server 2113c16e5528,42255,1733358321781; all regions closed. 2024-12-05T00:25:23,451 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:25:23,451 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:25:23,451 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:25:23,452 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:25:23,452 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:25:23,454 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42699 is added to blk_1073741834_1010 (size=1152) 2024-12-05T00:25:23,455 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073741834_1010 (size=1152) 2024-12-05T00:25:23,457 DEBUG [RS:0;2113c16e5528:42255 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/2ffb1745-b94d-756a-a14f-781704a26d2d/oldWALs 2024-12-05T00:25:23,457 INFO [RS:0;2113c16e5528:42255 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 2113c16e5528%2C42255%2C1733358321781.meta:.meta(num 1733358323119) 2024-12-05T00:25:23,458 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:25:23,458 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:25:23,458 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:25:23,458 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:25:23,458 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:25:23,460 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42699 is added to blk_1073741833_1009 (size=93) 2024-12-05T00:25:23,461 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073741833_1009 (size=93) 2024-12-05T00:25:23,463 DEBUG [RS:0;2113c16e5528:42255 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/2ffb1745-b94d-756a-a14f-781704a26d2d/oldWALs 2024-12-05T00:25:23,463 INFO [RS:0;2113c16e5528:42255 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 2113c16e5528%2C42255%2C1733358321781:(num 1733358322736) 2024-12-05T00:25:23,463 DEBUG [RS:0;2113c16e5528:42255 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T00:25:23,463 INFO [RS:0;2113c16e5528:42255 {}] regionserver.LeaseManager(133): Closed leases 2024-12-05T00:25:23,463 INFO [RS:0;2113c16e5528:42255 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-05T00:25:23,464 INFO [RS:0;2113c16e5528:42255 {}] hbase.ChoreService(370): Chore service for: regionserver/2113c16e5528:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-05T00:25:23,464 INFO [RS:0;2113c16e5528:42255 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-05T00:25:23,464 INFO [regionserver/2113c16e5528:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-05T00:25:23,464 INFO [RS:0;2113c16e5528:42255 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:42255 2024-12-05T00:25:23,466 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44907-0x1018002ea890000, quorum=127.0.0.1:49244, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-05T00:25:23,466 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42255-0x1018002ea890001, quorum=127.0.0.1:49244, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/2113c16e5528,42255,1733358321781 2024-12-05T00:25:23,466 INFO [RS:0;2113c16e5528:42255 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-05T00:25:23,467 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [2113c16e5528,42255,1733358321781] 2024-12-05T00:25:23,470 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/2113c16e5528,42255,1733358321781 already deleted, retry=false 2024-12-05T00:25:23,470 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 2113c16e5528,42255,1733358321781 expired; onlineServers=0 2024-12-05T00:25:23,470 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '2113c16e5528,44907,1733358321689' ***** 2024-12-05T00:25:23,470 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-05T00:25:23,470 INFO [M:0;2113c16e5528:44907 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-05T00:25:23,470 INFO [M:0;2113c16e5528:44907 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-05T00:25:23,470 DEBUG [M:0;2113c16e5528:44907 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-05T00:25:23,470 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-05T00:25:23,470 DEBUG [M:0;2113c16e5528:44907 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-05T00:25:23,470 DEBUG [master/2113c16e5528:0:becomeActiveMaster-HFileCleaner.large.0-1733358322475 {}] cleaner.HFileCleaner(306): Exit Thread[master/2113c16e5528:0:becomeActiveMaster-HFileCleaner.large.0-1733358322475,5,FailOnTimeoutGroup] 2024-12-05T00:25:23,470 DEBUG [master/2113c16e5528:0:becomeActiveMaster-HFileCleaner.small.0-1733358322475 {}] cleaner.HFileCleaner(306): Exit Thread[master/2113c16e5528:0:becomeActiveMaster-HFileCleaner.small.0-1733358322475,5,FailOnTimeoutGroup] 2024-12-05T00:25:23,470 INFO [M:0;2113c16e5528:44907 {}] hbase.ChoreService(370): Chore service for: master/2113c16e5528:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-05T00:25:23,471 INFO [M:0;2113c16e5528:44907 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-05T00:25:23,471 DEBUG [M:0;2113c16e5528:44907 {}] master.HMaster(1795): Stopping service threads 2024-12-05T00:25:23,471 INFO [M:0;2113c16e5528:44907 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-05T00:25:23,471 INFO [M:0;2113c16e5528:44907 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-05T00:25:23,471 INFO [M:0;2113c16e5528:44907 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-05T00:25:23,471 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-05T00:25:23,472 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44907-0x1018002ea890000, quorum=127.0.0.1:49244, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-05T00:25:23,473 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44907-0x1018002ea890000, quorum=127.0.0.1:49244, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T00:25:23,473 DEBUG [M:0;2113c16e5528:44907 {}] zookeeper.ZKUtil(347): master:44907-0x1018002ea890000, quorum=127.0.0.1:49244, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-05T00:25:23,473 WARN [M:0;2113c16e5528:44907 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-05T00:25:23,473 INFO [M:0;2113c16e5528:44907 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:37323/user/jenkins/test-data/2ffb1745-b94d-756a-a14f-781704a26d2d/.lastflushedseqids 2024-12-05T00:25:23,479 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073741836_1012 (size=108) 2024-12-05T00:25:23,479 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42699 is added to blk_1073741836_1012 (size=108) 2024-12-05T00:25:23,481 INFO [M:0;2113c16e5528:44907 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-05T00:25:23,481 INFO [M:0;2113c16e5528:44907 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-05T00:25:23,481 DEBUG [M:0;2113c16e5528:44907 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-05T00:25:23,481 INFO [M:0;2113c16e5528:44907 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-05T00:25:23,481 DEBUG [M:0;2113c16e5528:44907 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-05T00:25:23,482 DEBUG [M:0;2113c16e5528:44907 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-05T00:25:23,482 DEBUG [M:0;2113c16e5528:44907 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-05T00:25:23,482 INFO [M:0;2113c16e5528:44907 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=7.67 KB heapSize=11.34 KB 2024-12-05T00:25:23,504 DEBUG [M:0;2113c16e5528:44907 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37323/user/jenkins/test-data/2ffb1745-b94d-756a-a14f-781704a26d2d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/337add5355ea4c4cae52cb029058028d is 82, key is hbase:meta,,1/info:regioninfo/1733358323158/Put/seqid=0 2024-12-05T00:25:23,509 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42699 is added to blk_1073741837_1013 (size=5672) 2024-12-05T00:25:23,510 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073741837_1013 (size=5672) 2024-12-05T00:25:23,510 INFO [M:0;2113c16e5528:44907 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:37323/user/jenkins/test-data/2ffb1745-b94d-756a-a14f-781704a26d2d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/337add5355ea4c4cae52cb029058028d 2024-12-05T00:25:23,534 DEBUG [M:0;2113c16e5528:44907 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37323/user/jenkins/test-data/2ffb1745-b94d-756a-a14f-781704a26d2d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/8cab37d3968f4a69858601f8fd2270fa is 240, key is \x00\x00\x00\x00\x00\x00\x00\x01/proc:d/1733358323190/Put/seqid=0 2024-12-05T00:25:23,542 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42699 is added to blk_1073741838_1014 (size=5275) 2024-12-05T00:25:23,542 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073741838_1014 (size=5275) 2024-12-05T00:25:23,543 INFO [M:0;2113c16e5528:44907 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.06 KB at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:37323/user/jenkins/test-data/2ffb1745-b94d-756a-a14f-781704a26d2d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/8cab37d3968f4a69858601f8fd2270fa 2024-12-05T00:25:23,568 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42255-0x1018002ea890001, quorum=127.0.0.1:49244, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-05T00:25:23,568 INFO [RS:0;2113c16e5528:42255 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-05T00:25:23,568 INFO [RS:0;2113c16e5528:42255 {}] regionserver.HRegionServer(1031): Exiting; stopping=2113c16e5528,42255,1733358321781; zookeeper connection closed. 2024-12-05T00:25:23,568 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42255-0x1018002ea890001, quorum=127.0.0.1:49244, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-05T00:25:23,568 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@9b6063e {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@9b6063e 2024-12-05T00:25:23,569 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-12-05T00:25:23,580 DEBUG [M:0;2113c16e5528:44907 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37323/user/jenkins/test-data/2ffb1745-b94d-756a-a14f-781704a26d2d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/367554e9f0924cd8a72a1c90de265191 is 69, key is 2113c16e5528,42255,1733358321781/rs:state/1733358322557/Put/seqid=0 2024-12-05T00:25:23,590 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42699 is added to blk_1073741839_1015 (size=5156) 2024-12-05T00:25:23,591 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073741839_1015 (size=5156) 2024-12-05T00:25:23,592 INFO [M:0;2113c16e5528:44907 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:37323/user/jenkins/test-data/2ffb1745-b94d-756a-a14f-781704a26d2d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/367554e9f0924cd8a72a1c90de265191 2024-12-05T00:25:23,623 DEBUG [M:0;2113c16e5528:44907 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37323/user/jenkins/test-data/2ffb1745-b94d-756a-a14f-781704a26d2d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/2ddbaa8932d949d19fdbc7f54e6b9895 is 52, key is load_balancer_on/state:d/1733358323243/Put/seqid=0 2024-12-05T00:25:23,629 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073741840_1016 (size=5056) 2024-12-05T00:25:23,629 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42699 is added to blk_1073741840_1016 (size=5056) 2024-12-05T00:25:23,630 INFO [M:0;2113c16e5528:44907 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:37323/user/jenkins/test-data/2ffb1745-b94d-756a-a14f-781704a26d2d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/2ddbaa8932d949d19fdbc7f54e6b9895 2024-12-05T00:25:23,637 DEBUG [M:0;2113c16e5528:44907 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37323/user/jenkins/test-data/2ffb1745-b94d-756a-a14f-781704a26d2d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/337add5355ea4c4cae52cb029058028d as hdfs://localhost:37323/user/jenkins/test-data/2ffb1745-b94d-756a-a14f-781704a26d2d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/337add5355ea4c4cae52cb029058028d 2024-12-05T00:25:23,644 INFO [M:0;2113c16e5528:44907 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37323/user/jenkins/test-data/2ffb1745-b94d-756a-a14f-781704a26d2d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/337add5355ea4c4cae52cb029058028d, entries=8, sequenceid=29, filesize=5.5 K 2024-12-05T00:25:23,645 DEBUG [M:0;2113c16e5528:44907 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37323/user/jenkins/test-data/2ffb1745-b94d-756a-a14f-781704a26d2d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/8cab37d3968f4a69858601f8fd2270fa as hdfs://localhost:37323/user/jenkins/test-data/2ffb1745-b94d-756a-a14f-781704a26d2d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/8cab37d3968f4a69858601f8fd2270fa 2024-12-05T00:25:23,656 INFO [M:0;2113c16e5528:44907 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37323/user/jenkins/test-data/2ffb1745-b94d-756a-a14f-781704a26d2d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/8cab37d3968f4a69858601f8fd2270fa, entries=3, sequenceid=29, filesize=5.2 K 2024-12-05T00:25:23,658 DEBUG [M:0;2113c16e5528:44907 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37323/user/jenkins/test-data/2ffb1745-b94d-756a-a14f-781704a26d2d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/367554e9f0924cd8a72a1c90de265191 as hdfs://localhost:37323/user/jenkins/test-data/2ffb1745-b94d-756a-a14f-781704a26d2d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/367554e9f0924cd8a72a1c90de265191 2024-12-05T00:25:23,666 INFO [M:0;2113c16e5528:44907 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37323/user/jenkins/test-data/2ffb1745-b94d-756a-a14f-781704a26d2d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/367554e9f0924cd8a72a1c90de265191, entries=1, sequenceid=29, filesize=5.0 K 2024-12-05T00:25:23,667 DEBUG [M:0;2113c16e5528:44907 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37323/user/jenkins/test-data/2ffb1745-b94d-756a-a14f-781704a26d2d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/2ddbaa8932d949d19fdbc7f54e6b9895 as hdfs://localhost:37323/user/jenkins/test-data/2ffb1745-b94d-756a-a14f-781704a26d2d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/2ddbaa8932d949d19fdbc7f54e6b9895 2024-12-05T00:25:23,674 INFO [M:0;2113c16e5528:44907 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37323/user/jenkins/test-data/2ffb1745-b94d-756a-a14f-781704a26d2d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/2ddbaa8932d949d19fdbc7f54e6b9895, entries=1, sequenceid=29, filesize=4.9 K 2024-12-05T00:25:23,676 INFO [M:0;2113c16e5528:44907 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 194ms, sequenceid=29, compaction requested=false 2024-12-05T00:25:23,678 INFO [M:0;2113c16e5528:44907 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-05T00:25:23,678 DEBUG [M:0;2113c16e5528:44907 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733358323481Disabling compacts and flushes for region at 1733358323481Disabling writes for close at 1733358323482 (+1 ms)Obtaining lock to block concurrent updates at 1733358323482Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733358323482Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=7850, getHeapSize=11544, getOffHeapSize=0, getCellsCount=36 at 1733358323483 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733358323483Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733358323483Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733358323503 (+20 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733358323503Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733358323516 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733358323534 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733358323534Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733358323550 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733358323579 (+29 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733358323580 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1733358323599 (+19 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1733358323622 (+23 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1733358323622Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1a206b8: reopening flushed file at 1733358323636 (+14 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3cf094e: reopening flushed file at 1733358323644 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@11ecd233: reopening flushed file at 1733358323656 (+12 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@689545ae: reopening flushed file at 1733358323666 (+10 ms)Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 194ms, sequenceid=29, compaction requested=false at 1733358323676 (+10 ms)Writing region close event to WAL at 1733358323678 (+2 ms)Closed at 1733358323678 2024-12-05T00:25:23,678 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:25:23,679 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:25:23,679 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:25:23,679 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:25:23,679 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:25:23,682 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073741830_1006 (size=10311) 2024-12-05T00:25:23,684 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-05T00:25:23,684 INFO [M:0;2113c16e5528:44907 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-05T00:25:23,684 INFO [M:0;2113c16e5528:44907 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:44907 2024-12-05T00:25:23,684 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42699 is added to blk_1073741830_1006 (size=10311) 2024-12-05T00:25:23,685 INFO [M:0;2113c16e5528:44907 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-05T00:25:23,787 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44907-0x1018002ea890000, quorum=127.0.0.1:49244, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-05T00:25:23,787 INFO [M:0;2113c16e5528:44907 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-05T00:25:23,787 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44907-0x1018002ea890000, quorum=127.0.0.1:49244, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-05T00:25:23,792 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@824b6ae{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-05T00:25:23,792 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5d7e5c06{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-05T00:25:23,792 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-05T00:25:23,792 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@376d199b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-05T00:25:23,792 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2b44e274{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/79308d4f-cab8-23a5-7240-d751a6ba9c86/hadoop.log.dir/,STOPPED} 2024-12-05T00:25:23,794 WARN [BP-1998844685-172.17.0.2-1733358320704 heartbeating to localhost/127.0.0.1:37323 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-05T00:25:23,794 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-05T00:25:23,794 WARN [BP-1998844685-172.17.0.2-1733358320704 heartbeating to localhost/127.0.0.1:37323 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1998844685-172.17.0.2-1733358320704 (Datanode Uuid e529cebd-dfdc-4ca8-b4bc-a0cc83f5208b) service to localhost/127.0.0.1:37323 2024-12-05T00:25:23,794 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-05T00:25:23,795 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/79308d4f-cab8-23a5-7240-d751a6ba9c86/cluster_f7f2c3ab-9744-9fc0-c1d0-7d1481f05e7a/data/data3/current/BP-1998844685-172.17.0.2-1733358320704 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-05T00:25:23,795 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/79308d4f-cab8-23a5-7240-d751a6ba9c86/cluster_f7f2c3ab-9744-9fc0-c1d0-7d1481f05e7a/data/data4/current/BP-1998844685-172.17.0.2-1733358320704 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-05T00:25:23,795 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-05T00:25:23,801 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5d4bdc00{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-05T00:25:23,801 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@687b21ce{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-05T00:25:23,801 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-05T00:25:23,802 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@276f8783{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-05T00:25:23,802 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4edee9ab{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/79308d4f-cab8-23a5-7240-d751a6ba9c86/hadoop.log.dir/,STOPPED} 2024-12-05T00:25:23,803 WARN [BP-1998844685-172.17.0.2-1733358320704 heartbeating to localhost/127.0.0.1:37323 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-05T00:25:23,803 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-05T00:25:23,803 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-05T00:25:23,803 WARN [BP-1998844685-172.17.0.2-1733358320704 heartbeating to localhost/127.0.0.1:37323 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1998844685-172.17.0.2-1733358320704 (Datanode Uuid dcec96bf-fe24-4f43-9d8f-8fdd8e36515b) service to localhost/127.0.0.1:37323 2024-12-05T00:25:23,804 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/79308d4f-cab8-23a5-7240-d751a6ba9c86/cluster_f7f2c3ab-9744-9fc0-c1d0-7d1481f05e7a/data/data1/current/BP-1998844685-172.17.0.2-1733358320704 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-05T00:25:23,804 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/79308d4f-cab8-23a5-7240-d751a6ba9c86/cluster_f7f2c3ab-9744-9fc0-c1d0-7d1481f05e7a/data/data2/current/BP-1998844685-172.17.0.2-1733358320704 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-05T00:25:23,805 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-05T00:25:23,811 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@511dc70f{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-05T00:25:23,811 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3e469283{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-05T00:25:23,811 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-05T00:25:23,811 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@45e3157d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-05T00:25:23,812 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3197ca45{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/79308d4f-cab8-23a5-7240-d751a6ba9c86/hadoop.log.dir/,STOPPED} 2024-12-05T00:25:23,819 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-05T00:25:23,840 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-05T00:25:23,841 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-05T00:25:23,841 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/79308d4f-cab8-23a5-7240-d751a6ba9c86/hadoop.log.dir so I do NOT create it in target/test-data/7531cff9-2fe6-fdba-fd33-24a68d0721dc 2024-12-05T00:25:23,841 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/79308d4f-cab8-23a5-7240-d751a6ba9c86/hadoop.tmp.dir so I do NOT create it in target/test-data/7531cff9-2fe6-fdba-fd33-24a68d0721dc 2024-12-05T00:25:23,841 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7531cff9-2fe6-fdba-fd33-24a68d0721dc/cluster_d40ff393-c986-a4ef-b5d0-88d02549ef9a, deleteOnExit=true 2024-12-05T00:25:23,841 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-12-05T00:25:23,841 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7531cff9-2fe6-fdba-fd33-24a68d0721dc/test.cache.data in system properties and HBase conf 2024-12-05T00:25:23,841 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7531cff9-2fe6-fdba-fd33-24a68d0721dc/hadoop.tmp.dir in system properties and HBase conf 2024-12-05T00:25:23,841 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7531cff9-2fe6-fdba-fd33-24a68d0721dc/hadoop.log.dir in system properties and HBase conf 2024-12-05T00:25:23,841 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7531cff9-2fe6-fdba-fd33-24a68d0721dc/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-05T00:25:23,841 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7531cff9-2fe6-fdba-fd33-24a68d0721dc/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-05T00:25:23,841 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-05T00:25:23,842 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-05T00:25:23,842 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7531cff9-2fe6-fdba-fd33-24a68d0721dc/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-05T00:25:23,842 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7531cff9-2fe6-fdba-fd33-24a68d0721dc/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-05T00:25:23,842 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7531cff9-2fe6-fdba-fd33-24a68d0721dc/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-05T00:25:23,842 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7531cff9-2fe6-fdba-fd33-24a68d0721dc/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-05T00:25:23,842 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7531cff9-2fe6-fdba-fd33-24a68d0721dc/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-05T00:25:23,843 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7531cff9-2fe6-fdba-fd33-24a68d0721dc/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-05T00:25:23,843 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7531cff9-2fe6-fdba-fd33-24a68d0721dc/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-05T00:25:23,843 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7531cff9-2fe6-fdba-fd33-24a68d0721dc/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-05T00:25:23,843 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7531cff9-2fe6-fdba-fd33-24a68d0721dc/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-05T00:25:23,843 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7531cff9-2fe6-fdba-fd33-24a68d0721dc/nfs.dump.dir in system properties and HBase conf 2024-12-05T00:25:23,843 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7531cff9-2fe6-fdba-fd33-24a68d0721dc/java.io.tmpdir in system properties and HBase conf 2024-12-05T00:25:23,843 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7531cff9-2fe6-fdba-fd33-24a68d0721dc/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-05T00:25:23,844 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7531cff9-2fe6-fdba-fd33-24a68d0721dc/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-05T00:25:23,844 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7531cff9-2fe6-fdba-fd33-24a68d0721dc/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-05T00:25:23,858 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-05T00:25:23,947 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-05T00:25:23,954 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-05T00:25:23,961 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-05T00:25:23,961 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-05T00:25:23,961 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-05T00:25:23,962 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-05T00:25:23,963 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6a488aac{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7531cff9-2fe6-fdba-fd33-24a68d0721dc/hadoop.log.dir/,AVAILABLE} 2024-12-05T00:25:23,963 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3ed3a961{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-05T00:25:24,083 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7cd2a640{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7531cff9-2fe6-fdba-fd33-24a68d0721dc/java.io.tmpdir/jetty-localhost-41415-hadoop-hdfs-3_4_1-tests_jar-_-any-16020593861074340906/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-05T00:25:24,084 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@64b7b556{HTTP/1.1, (http/1.1)}{localhost:41415} 2024-12-05T00:25:24,084 INFO [Time-limited test {}] server.Server(415): Started @104711ms 2024-12-05T00:25:24,098 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-05T00:25:24,183 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-05T00:25:24,187 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-05T00:25:24,188 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-05T00:25:24,188 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-05T00:25:24,188 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-05T00:25:24,188 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@257cf4bb{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7531cff9-2fe6-fdba-fd33-24a68d0721dc/hadoop.log.dir/,AVAILABLE} 2024-12-05T00:25:24,189 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@30e7c448{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-05T00:25:24,316 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@ab5393f{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7531cff9-2fe6-fdba-fd33-24a68d0721dc/java.io.tmpdir/jetty-localhost-46783-hadoop-hdfs-3_4_1-tests_jar-_-any-9813530624596117601/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-05T00:25:24,317 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@47f82e76{HTTP/1.1, (http/1.1)}{localhost:46783} 2024-12-05T00:25:24,317 INFO [Time-limited test {}] server.Server(415): Started @104944ms 2024-12-05T00:25:24,319 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-05T00:25:24,360 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-05T00:25:24,367 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-05T00:25:24,372 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-05T00:25:24,372 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-05T00:25:24,372 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-05T00:25:24,373 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@69bbaec1{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7531cff9-2fe6-fdba-fd33-24a68d0721dc/hadoop.log.dir/,AVAILABLE} 2024-12-05T00:25:24,374 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7720beab{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-05T00:25:24,433 WARN [Thread-656 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7531cff9-2fe6-fdba-fd33-24a68d0721dc/cluster_d40ff393-c986-a4ef-b5d0-88d02549ef9a/data/data1/current/BP-1914787219-172.17.0.2-1733358323877/current, will proceed with Du for space computation calculation, 2024-12-05T00:25:24,434 WARN [Thread-657 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7531cff9-2fe6-fdba-fd33-24a68d0721dc/cluster_d40ff393-c986-a4ef-b5d0-88d02549ef9a/data/data2/current/BP-1914787219-172.17.0.2-1733358323877/current, will proceed with Du for space computation calculation, 2024-12-05T00:25:24,470 WARN [Thread-635 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-05T00:25:24,473 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xeed1148f0364acfb with lease ID 0x6f798abb99ec6dd2: Processing first storage report for DS-7b93ce7a-bb65-4ad6-a1d4-75b5c5ef17f8 from datanode DatanodeRegistration(127.0.0.1:35205, datanodeUuid=515aad1c-3331-473f-a1b0-e49d806ffab8, infoPort=34567, infoSecurePort=0, ipcPort=41801, storageInfo=lv=-57;cid=testClusterID;nsid=355484051;c=1733358323877) 2024-12-05T00:25:24,474 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xeed1148f0364acfb with lease ID 0x6f798abb99ec6dd2: from storage DS-7b93ce7a-bb65-4ad6-a1d4-75b5c5ef17f8 node DatanodeRegistration(127.0.0.1:35205, datanodeUuid=515aad1c-3331-473f-a1b0-e49d806ffab8, infoPort=34567, infoSecurePort=0, ipcPort=41801, storageInfo=lv=-57;cid=testClusterID;nsid=355484051;c=1733358323877), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-05T00:25:24,474 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xeed1148f0364acfb with lease ID 0x6f798abb99ec6dd2: Processing first storage report for DS-a333f0de-900f-4be8-9b1c-8ab10a5f00dc from datanode DatanodeRegistration(127.0.0.1:35205, datanodeUuid=515aad1c-3331-473f-a1b0-e49d806ffab8, infoPort=34567, infoSecurePort=0, ipcPort=41801, storageInfo=lv=-57;cid=testClusterID;nsid=355484051;c=1733358323877) 2024-12-05T00:25:24,474 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xeed1148f0364acfb with lease ID 0x6f798abb99ec6dd2: from storage DS-a333f0de-900f-4be8-9b1c-8ab10a5f00dc node DatanodeRegistration(127.0.0.1:35205, datanodeUuid=515aad1c-3331-473f-a1b0-e49d806ffab8, infoPort=34567, infoSecurePort=0, ipcPort=41801, storageInfo=lv=-57;cid=testClusterID;nsid=355484051;c=1733358323877), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-05T00:25:24,516 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@21d5e4af{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7531cff9-2fe6-fdba-fd33-24a68d0721dc/java.io.tmpdir/jetty-localhost-37275-hadoop-hdfs-3_4_1-tests_jar-_-any-12305934237056431067/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-05T00:25:24,517 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@145c0180{HTTP/1.1, (http/1.1)}{localhost:37275} 2024-12-05T00:25:24,517 INFO [Time-limited test {}] server.Server(415): Started @105144ms 2024-12-05T00:25:24,518 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-05T00:25:24,595 INFO [regionserver/2113c16e5528:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-05T00:25:24,636 WARN [Thread-682 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7531cff9-2fe6-fdba-fd33-24a68d0721dc/cluster_d40ff393-c986-a4ef-b5d0-88d02549ef9a/data/data3/current/BP-1914787219-172.17.0.2-1733358323877/current, will proceed with Du for space computation calculation, 2024-12-05T00:25:24,636 WARN [Thread-683 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7531cff9-2fe6-fdba-fd33-24a68d0721dc/cluster_d40ff393-c986-a4ef-b5d0-88d02549ef9a/data/data4/current/BP-1914787219-172.17.0.2-1733358323877/current, will proceed with Du for space computation calculation, 2024-12-05T00:25:24,668 WARN [Thread-671 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-05T00:25:24,672 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x16573a8c5a2845e0 with lease ID 0x6f798abb99ec6dd3: Processing first storage report for DS-32e7a19b-9b43-45a3-b613-bb1258e4827b from datanode DatanodeRegistration(127.0.0.1:34943, datanodeUuid=62e5d8db-0dd5-4ee1-910d-f64b93970567, infoPort=36003, infoSecurePort=0, ipcPort=35539, storageInfo=lv=-57;cid=testClusterID;nsid=355484051;c=1733358323877) 2024-12-05T00:25:24,672 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x16573a8c5a2845e0 with lease ID 0x6f798abb99ec6dd3: from storage DS-32e7a19b-9b43-45a3-b613-bb1258e4827b node DatanodeRegistration(127.0.0.1:34943, datanodeUuid=62e5d8db-0dd5-4ee1-910d-f64b93970567, infoPort=36003, infoSecurePort=0, ipcPort=35539, storageInfo=lv=-57;cid=testClusterID;nsid=355484051;c=1733358323877), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-05T00:25:24,672 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x16573a8c5a2845e0 with lease ID 0x6f798abb99ec6dd3: Processing first storage report for DS-848595dd-f9d6-4acc-afbc-5291a9af1401 from datanode DatanodeRegistration(127.0.0.1:34943, datanodeUuid=62e5d8db-0dd5-4ee1-910d-f64b93970567, infoPort=36003, infoSecurePort=0, ipcPort=35539, storageInfo=lv=-57;cid=testClusterID;nsid=355484051;c=1733358323877) 2024-12-05T00:25:24,672 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x16573a8c5a2845e0 with lease ID 0x6f798abb99ec6dd3: from storage DS-848595dd-f9d6-4acc-afbc-5291a9af1401 node DatanodeRegistration(127.0.0.1:34943, datanodeUuid=62e5d8db-0dd5-4ee1-910d-f64b93970567, infoPort=36003, infoSecurePort=0, ipcPort=35539, storageInfo=lv=-57;cid=testClusterID;nsid=355484051;c=1733358323877), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-05T00:25:24,744 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-05T00:25:24,753 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-05T00:25:24,769 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7531cff9-2fe6-fdba-fd33-24a68d0721dc 2024-12-05T00:25:24,789 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7531cff9-2fe6-fdba-fd33-24a68d0721dc/cluster_d40ff393-c986-a4ef-b5d0-88d02549ef9a/zookeeper_0, clientPort=54477, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7531cff9-2fe6-fdba-fd33-24a68d0721dc/cluster_d40ff393-c986-a4ef-b5d0-88d02549ef9a/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7531cff9-2fe6-fdba-fd33-24a68d0721dc/cluster_d40ff393-c986-a4ef-b5d0-88d02549ef9a/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-05T00:25:24,792 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=54477 2024-12-05T00:25:24,792 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-05T00:25:24,795 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-05T00:25:24,818 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34943 is added to blk_1073741825_1001 (size=7) 2024-12-05T00:25:24,818 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35205 is added to blk_1073741825_1001 (size=7) 2024-12-05T00:25:24,821 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8 with version=8 2024-12-05T00:25:24,821 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:45273/user/jenkins/test-data/7f70659c-3a14-bb99-79d3-4868f5326e37/hbase-staging 2024-12-05T00:25:24,824 INFO [Time-limited test {}] client.ConnectionUtils(128): master/2113c16e5528:0 server-side Connection retries=45 2024-12-05T00:25:24,824 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-05T00:25:24,824 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-05T00:25:24,824 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-05T00:25:24,824 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-05T00:25:24,824 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-05T00:25:24,825 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-05T00:25:24,825 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-05T00:25:24,829 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:43963 2024-12-05T00:25:24,831 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:43963 connecting to ZooKeeper ensemble=127.0.0.1:54477 2024-12-05T00:25:24,839 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:439630x0, quorum=127.0.0.1:54477, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-05T00:25:24,839 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:43963-0x1018002f6bd0000 connected 2024-12-05T00:25:24,884 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-05T00:25:24,887 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-05T00:25:24,890 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:43963-0x1018002f6bd0000, quorum=127.0.0.1:54477, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-05T00:25:24,891 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8, hbase.cluster.distributed=false 2024-12-05T00:25:24,895 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:43963-0x1018002f6bd0000, quorum=127.0.0.1:54477, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-05T00:25:24,900 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=43963 2024-12-05T00:25:24,901 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=43963 2024-12-05T00:25:24,901 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=43963 2024-12-05T00:25:24,904 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=43963 2024-12-05T00:25:24,905 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=43963 2024-12-05T00:25:24,931 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/2113c16e5528:0 server-side Connection retries=45 2024-12-05T00:25:24,932 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-05T00:25:24,932 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-05T00:25:24,932 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-05T00:25:24,932 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-05T00:25:24,932 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-05T00:25:24,932 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-05T00:25:24,932 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-05T00:25:24,933 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:37749 2024-12-05T00:25:24,935 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:37749 connecting to ZooKeeper ensemble=127.0.0.1:54477 2024-12-05T00:25:24,936 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-05T00:25:24,939 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-05T00:25:24,945 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:377490x0, quorum=127.0.0.1:54477, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-05T00:25:24,945 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:37749-0x1018002f6bd0001 connected 2024-12-05T00:25:24,945 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37749-0x1018002f6bd0001, quorum=127.0.0.1:54477, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-05T00:25:24,946 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-05T00:25:24,948 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-05T00:25:24,949 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37749-0x1018002f6bd0001, quorum=127.0.0.1:54477, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-05T00:25:24,950 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37749-0x1018002f6bd0001, quorum=127.0.0.1:54477, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-05T00:25:24,953 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=37749 2024-12-05T00:25:24,956 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=37749 2024-12-05T00:25:24,960 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=37749 2024-12-05T00:25:24,968 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=37749 2024-12-05T00:25:24,969 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=37749 2024-12-05T00:25:24,989 DEBUG [M:0;2113c16e5528:43963 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;2113c16e5528:43963 2024-12-05T00:25:24,990 INFO [master/2113c16e5528:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/2113c16e5528,43963,1733358324824 2024-12-05T00:25:24,993 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43963-0x1018002f6bd0000, quorum=127.0.0.1:54477, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-05T00:25:24,993 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37749-0x1018002f6bd0001, quorum=127.0.0.1:54477, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-05T00:25:24,993 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:43963-0x1018002f6bd0000, quorum=127.0.0.1:54477, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/2113c16e5528,43963,1733358324824 2024-12-05T00:25:24,995 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37749-0x1018002f6bd0001, quorum=127.0.0.1:54477, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-05T00:25:24,995 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43963-0x1018002f6bd0000, quorum=127.0.0.1:54477, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T00:25:24,995 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37749-0x1018002f6bd0001, quorum=127.0.0.1:54477, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T00:25:24,995 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:43963-0x1018002f6bd0000, quorum=127.0.0.1:54477, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-05T00:25:24,996 INFO [master/2113c16e5528:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/2113c16e5528,43963,1733358324824 from backup master directory 2024-12-05T00:25:24,997 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43963-0x1018002f6bd0000, quorum=127.0.0.1:54477, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/2113c16e5528,43963,1733358324824 2024-12-05T00:25:24,997 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37749-0x1018002f6bd0001, quorum=127.0.0.1:54477, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-05T00:25:24,997 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43963-0x1018002f6bd0000, quorum=127.0.0.1:54477, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-05T00:25:24,998 WARN [master/2113c16e5528:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-05T00:25:24,998 INFO [master/2113c16e5528:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=2113c16e5528,43963,1733358324824 2024-12-05T00:25:25,004 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/hbase.id] with ID: 310224bd-22c8-4de5-9538-72faf04f48b2 2024-12-05T00:25:25,004 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/.tmp/hbase.id 2024-12-05T00:25:25,011 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34943 is added to blk_1073741826_1002 (size=42) 2024-12-05T00:25:25,012 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35205 is added to blk_1073741826_1002 (size=42) 2024-12-05T00:25:25,012 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/.tmp/hbase.id]:[hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/hbase.id] 2024-12-05T00:25:25,028 INFO [master/2113c16e5528:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-05T00:25:25,028 INFO [master/2113c16e5528:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-05T00:25:25,030 INFO [master/2113c16e5528:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-12-05T00:25:25,032 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37749-0x1018002f6bd0001, quorum=127.0.0.1:54477, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T00:25:25,032 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43963-0x1018002f6bd0000, quorum=127.0.0.1:54477, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T00:25:25,048 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35205 is added to blk_1073741827_1003 (size=196) 2024-12-05T00:25:25,048 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34943 is added to blk_1073741827_1003 (size=196) 2024-12-05T00:25:25,049 INFO [master/2113c16e5528:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-05T00:25:25,050 INFO [master/2113c16e5528:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-05T00:25:25,050 INFO [master/2113c16e5528:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-05T00:25:25,062 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34943 is added to blk_1073741828_1004 (size=1189) 2024-12-05T00:25:25,063 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35205 is added to blk_1073741828_1004 (size=1189) 2024-12-05T00:25:25,064 INFO [master/2113c16e5528:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/MasterData/data/master/store 2024-12-05T00:25:25,071 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34943 is added to blk_1073741829_1005 (size=34) 2024-12-05T00:25:25,072 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35205 is added to blk_1073741829_1005 (size=34) 2024-12-05T00:25:25,072 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T00:25:25,073 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-05T00:25:25,073 INFO [master/2113c16e5528:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-05T00:25:25,073 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-05T00:25:25,073 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-05T00:25:25,073 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-05T00:25:25,073 INFO [master/2113c16e5528:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-05T00:25:25,073 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733358325073Disabling compacts and flushes for region at 1733358325073Disabling writes for close at 1733358325073Writing region close event to WAL at 1733358325073Closed at 1733358325073 2024-12-05T00:25:25,074 WARN [master/2113c16e5528:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/MasterData/data/master/store/.initializing 2024-12-05T00:25:25,074 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/MasterData/WALs/2113c16e5528,43963,1733358324824 2024-12-05T00:25:25,077 INFO [master/2113c16e5528:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=2113c16e5528%2C43963%2C1733358324824, suffix=, logDir=hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/MasterData/WALs/2113c16e5528,43963,1733358324824, archiveDir=hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/MasterData/oldWALs, maxLogs=10 2024-12-05T00:25:25,078 INFO [master/2113c16e5528:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 2113c16e5528%2C43963%2C1733358324824.1733358325078 2024-12-05T00:25:25,092 INFO [master/2113c16e5528:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/MasterData/WALs/2113c16e5528,43963,1733358324824/2113c16e5528%2C43963%2C1733358324824.1733358325078 2024-12-05T00:25:25,100 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36003:36003),(127.0.0.1/127.0.0.1:34567:34567)] 2024-12-05T00:25:25,104 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-05T00:25:25,105 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T00:25:25,105 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-05T00:25:25,105 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-05T00:25:25,108 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-05T00:25:25,111 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-05T00:25:25,111 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T00:25:25,112 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-05T00:25:25,112 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-05T00:25:25,114 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-05T00:25:25,114 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T00:25:25,115 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T00:25:25,115 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-05T00:25:25,116 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-05T00:25:25,116 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T00:25:25,117 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T00:25:25,117 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-05T00:25:25,120 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-05T00:25:25,120 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T00:25:25,121 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T00:25:25,121 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-05T00:25:25,122 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-05T00:25:25,122 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-05T00:25:25,124 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-05T00:25:25,125 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-05T00:25:25,125 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-05T00:25:25,127 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-05T00:25:25,131 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-05T00:25:25,132 INFO [master/2113c16e5528:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=862737, jitterRate=0.09702780842781067}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-05T00:25:25,133 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733358325105Initializing all the Stores at 1733358325107 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733358325107Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733358325108 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733358325108Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733358325108Cleaning up temporary data from old regions at 1733358325125 (+17 ms)Region opened successfully at 1733358325133 (+8 ms) 2024-12-05T00:25:25,133 INFO [master/2113c16e5528:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-05T00:25:25,140 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@8003169, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=2113c16e5528/172.17.0.2:0 2024-12-05T00:25:25,141 INFO [master/2113c16e5528:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-05T00:25:25,141 INFO [master/2113c16e5528:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-05T00:25:25,142 INFO [master/2113c16e5528:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-05T00:25:25,142 INFO [master/2113c16e5528:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-05T00:25:25,143 INFO [master/2113c16e5528:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-12-05T00:25:25,143 INFO [master/2113c16e5528:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-12-05T00:25:25,143 INFO [master/2113c16e5528:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-05T00:25:25,147 INFO [master/2113c16e5528:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-05T00:25:25,148 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43963-0x1018002f6bd0000, quorum=127.0.0.1:54477, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-05T00:25:25,150 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-05T00:25:25,150 INFO [master/2113c16e5528:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-05T00:25:25,151 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43963-0x1018002f6bd0000, quorum=127.0.0.1:54477, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-05T00:25:25,152 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-05T00:25:25,153 INFO [master/2113c16e5528:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-05T00:25:25,160 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43963-0x1018002f6bd0000, quorum=127.0.0.1:54477, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-05T00:25:25,161 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-05T00:25:25,162 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43963-0x1018002f6bd0000, quorum=127.0.0.1:54477, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-05T00:25:25,165 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-05T00:25:25,177 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43963-0x1018002f6bd0000, quorum=127.0.0.1:54477, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-05T00:25:25,178 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-05T00:25:25,180 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43963-0x1018002f6bd0000, quorum=127.0.0.1:54477, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-05T00:25:25,180 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37749-0x1018002f6bd0001, quorum=127.0.0.1:54477, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-05T00:25:25,180 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43963-0x1018002f6bd0000, quorum=127.0.0.1:54477, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T00:25:25,180 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37749-0x1018002f6bd0001, quorum=127.0.0.1:54477, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T00:25:25,181 INFO [master/2113c16e5528:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=2113c16e5528,43963,1733358324824, sessionid=0x1018002f6bd0000, setting cluster-up flag (Was=false) 2024-12-05T00:25:25,185 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43963-0x1018002f6bd0000, quorum=127.0.0.1:54477, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T00:25:25,185 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37749-0x1018002f6bd0001, quorum=127.0.0.1:54477, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T00:25:25,191 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-05T00:25:25,193 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=2113c16e5528,43963,1733358324824 2024-12-05T00:25:25,202 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37749-0x1018002f6bd0001, quorum=127.0.0.1:54477, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T00:25:25,203 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43963-0x1018002f6bd0000, quorum=127.0.0.1:54477, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T00:25:25,209 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-05T00:25:25,212 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=2113c16e5528,43963,1733358324824 2024-12-05T00:25:25,214 INFO [master/2113c16e5528:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-05T00:25:25,217 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-05T00:25:25,217 INFO [master/2113c16e5528:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-05T00:25:25,218 INFO [master/2113c16e5528:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-05T00:25:25,218 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 2113c16e5528,43963,1733358324824 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-05T00:25:25,219 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/2113c16e5528:0, corePoolSize=5, maxPoolSize=5 2024-12-05T00:25:25,219 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/2113c16e5528:0, corePoolSize=5, maxPoolSize=5 2024-12-05T00:25:25,220 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/2113c16e5528:0, corePoolSize=5, maxPoolSize=5 2024-12-05T00:25:25,220 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/2113c16e5528:0, corePoolSize=5, maxPoolSize=5 2024-12-05T00:25:25,220 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/2113c16e5528:0, corePoolSize=10, maxPoolSize=10 2024-12-05T00:25:25,220 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/2113c16e5528:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:25:25,220 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/2113c16e5528:0, corePoolSize=2, maxPoolSize=2 2024-12-05T00:25:25,220 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/2113c16e5528:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:25:25,222 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-05T00:25:25,222 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-05T00:25:25,225 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T00:25:25,226 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-05T00:25:25,227 INFO [master/2113c16e5528:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733358355227 2024-12-05T00:25:25,227 INFO [master/2113c16e5528:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-05T00:25:25,228 INFO [master/2113c16e5528:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-05T00:25:25,228 INFO [master/2113c16e5528:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-05T00:25:25,228 INFO [master/2113c16e5528:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-05T00:25:25,228 INFO [master/2113c16e5528:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-05T00:25:25,228 INFO [master/2113c16e5528:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-05T00:25:25,230 INFO [master/2113c16e5528:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-05T00:25:25,231 INFO [master/2113c16e5528:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-05T00:25:25,231 INFO [master/2113c16e5528:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-05T00:25:25,231 INFO [master/2113c16e5528:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-05T00:25:25,232 INFO [master/2113c16e5528:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-05T00:25:25,232 INFO [master/2113c16e5528:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-05T00:25:25,233 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/2113c16e5528:0:becomeActiveMaster-HFileCleaner.large.0-1733358325233,5,FailOnTimeoutGroup] 2024-12-05T00:25:25,233 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/2113c16e5528:0:becomeActiveMaster-HFileCleaner.small.0-1733358325233,5,FailOnTimeoutGroup] 2024-12-05T00:25:25,233 INFO [master/2113c16e5528:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-05T00:25:25,233 INFO [master/2113c16e5528:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-05T00:25:25,233 INFO [master/2113c16e5528:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-05T00:25:25,233 INFO [master/2113c16e5528:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-05T00:25:25,247 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35205 is added to blk_1073741831_1007 (size=1321) 2024-12-05T00:25:25,248 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34943 is added to blk_1073741831_1007 (size=1321) 2024-12-05T00:25:25,249 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-05T00:25:25,249 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8 2024-12-05T00:25:25,262 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35205 is added to blk_1073741832_1008 (size=32) 2024-12-05T00:25:25,263 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34943 is added to blk_1073741832_1008 (size=32) 2024-12-05T00:25:25,265 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T00:25:25,268 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-05T00:25:25,270 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-05T00:25:25,271 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T00:25:25,275 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-05T00:25:25,275 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-05T00:25:25,276 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-05T00:25:25,278 INFO [RS:0;2113c16e5528:37749 {}] regionserver.HRegionServer(746): ClusterId : 310224bd-22c8-4de5-9538-72faf04f48b2 2024-12-05T00:25:25,278 DEBUG [RS:0;2113c16e5528:37749 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-05T00:25:25,279 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-05T00:25:25,279 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T00:25:25,283 DEBUG [RS:0;2113c16e5528:37749 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-05T00:25:25,283 DEBUG [RS:0;2113c16e5528:37749 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-05T00:25:25,285 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-05T00:25:25,285 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-05T00:25:25,285 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-05T00:25:25,289 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-05T00:25:25,289 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T00:25:25,293 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-05T00:25:25,293 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-05T00:25:25,293 DEBUG [RS:0;2113c16e5528:37749 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-05T00:25:25,296 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-05T00:25:25,296 DEBUG [RS:0;2113c16e5528:37749 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@9e33cce, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=2113c16e5528/172.17.0.2:0 2024-12-05T00:25:25,296 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T00:25:25,307 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-05T00:25:25,307 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-05T00:25:25,309 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/data/hbase/meta/1588230740 2024-12-05T00:25:25,309 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/data/hbase/meta/1588230740 2024-12-05T00:25:25,312 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-05T00:25:25,312 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-05T00:25:25,313 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-05T00:25:25,315 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-05T00:25:25,317 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-05T00:25:25,323 DEBUG [RS:0;2113c16e5528:37749 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;2113c16e5528:37749 2024-12-05T00:25:25,323 INFO [RS:0;2113c16e5528:37749 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-05T00:25:25,323 INFO [RS:0;2113c16e5528:37749 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-05T00:25:25,323 DEBUG [RS:0;2113c16e5528:37749 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-05T00:25:25,324 INFO [RS:0;2113c16e5528:37749 {}] regionserver.HRegionServer(2659): reportForDuty to master=2113c16e5528,43963,1733358324824 with port=37749, startcode=1733358324931 2024-12-05T00:25:25,325 DEBUG [RS:0;2113c16e5528:37749 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-05T00:25:25,329 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-05T00:25:25,330 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=691614, jitterRate=-0.1205676794052124}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-05T00:25:25,331 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733358325265Initializing all the Stores at 1733358325266 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733358325266Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733358325268 (+2 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733358325268Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733358325268Cleaning up temporary data from old regions at 1733358325313 (+45 ms)Region opened successfully at 1733358325331 (+18 ms) 2024-12-05T00:25:25,331 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-05T00:25:25,331 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-05T00:25:25,332 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-05T00:25:25,332 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-05T00:25:25,332 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-05T00:25:25,335 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-05T00:25:25,335 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733358325331Disabling compacts and flushes for region at 1733358325331Disabling writes for close at 1733358325332 (+1 ms)Writing region close event to WAL at 1733358325335 (+3 ms)Closed at 1733358325335 2024-12-05T00:25:25,336 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-05T00:25:25,337 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-05T00:25:25,337 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-05T00:25:25,337 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-05T00:25:25,337 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-05T00:25:25,340 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-05T00:25:25,342 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-05T00:25:25,349 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-05T00:25:25,349 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-05T00:25:25,349 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling 2024-12-05T00:25:25,350 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55399, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-12-05T00:25:25,351 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43963 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 2113c16e5528,37749,1733358324931 2024-12-05T00:25:25,351 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43963 {}] master.ServerManager(517): Registering regionserver=2113c16e5528,37749,1733358324931 2024-12-05T00:25:25,357 DEBUG [RS:0;2113c16e5528:37749 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8 2024-12-05T00:25:25,357 DEBUG [RS:0;2113c16e5528:37749 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:41383 2024-12-05T00:25:25,357 DEBUG [RS:0;2113c16e5528:37749 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-05T00:25:25,359 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43963-0x1018002f6bd0000, quorum=127.0.0.1:54477, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-05T00:25:25,360 DEBUG [RS:0;2113c16e5528:37749 {}] zookeeper.ZKUtil(111): regionserver:37749-0x1018002f6bd0001, quorum=127.0.0.1:54477, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/2113c16e5528,37749,1733358324931 2024-12-05T00:25:25,360 WARN [RS:0;2113c16e5528:37749 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-05T00:25:25,360 INFO [RS:0;2113c16e5528:37749 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-05T00:25:25,360 DEBUG [RS:0;2113c16e5528:37749 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,37749,1733358324931 2024-12-05T00:25:25,365 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [2113c16e5528,37749,1733358324931] 2024-12-05T00:25:25,372 INFO [RS:0;2113c16e5528:37749 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-05T00:25:25,376 INFO [RS:0;2113c16e5528:37749 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-05T00:25:25,380 INFO [RS:0;2113c16e5528:37749 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-05T00:25:25,381 INFO [RS:0;2113c16e5528:37749 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-05T00:25:25,381 INFO [RS:0;2113c16e5528:37749 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-05T00:25:25,382 INFO [RS:0;2113c16e5528:37749 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-05T00:25:25,382 INFO [RS:0;2113c16e5528:37749 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-05T00:25:25,383 DEBUG [RS:0;2113c16e5528:37749 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/2113c16e5528:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:25:25,383 DEBUG [RS:0;2113c16e5528:37749 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/2113c16e5528:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:25:25,383 DEBUG [RS:0;2113c16e5528:37749 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/2113c16e5528:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:25:25,383 DEBUG [RS:0;2113c16e5528:37749 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/2113c16e5528:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:25:25,383 DEBUG [RS:0;2113c16e5528:37749 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/2113c16e5528:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:25:25,383 DEBUG [RS:0;2113c16e5528:37749 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/2113c16e5528:0, corePoolSize=2, maxPoolSize=2 2024-12-05T00:25:25,383 DEBUG [RS:0;2113c16e5528:37749 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/2113c16e5528:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:25:25,383 DEBUG [RS:0;2113c16e5528:37749 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/2113c16e5528:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:25:25,383 DEBUG [RS:0;2113c16e5528:37749 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/2113c16e5528:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:25:25,383 DEBUG [RS:0;2113c16e5528:37749 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/2113c16e5528:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:25:25,383 DEBUG [RS:0;2113c16e5528:37749 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/2113c16e5528:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:25:25,384 DEBUG [RS:0;2113c16e5528:37749 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/2113c16e5528:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:25:25,384 DEBUG [RS:0;2113c16e5528:37749 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0, corePoolSize=3, maxPoolSize=3 2024-12-05T00:25:25,384 DEBUG [RS:0;2113c16e5528:37749 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/2113c16e5528:0, corePoolSize=3, maxPoolSize=3 2024-12-05T00:25:25,388 INFO [RS:0;2113c16e5528:37749 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-05T00:25:25,388 INFO [RS:0;2113c16e5528:37749 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-05T00:25:25,388 INFO [RS:0;2113c16e5528:37749 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-05T00:25:25,388 INFO [RS:0;2113c16e5528:37749 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-05T00:25:25,388 INFO [RS:0;2113c16e5528:37749 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-05T00:25:25,388 INFO [RS:0;2113c16e5528:37749 {}] hbase.ChoreService(168): Chore ScheduledChore name=2113c16e5528,37749,1733358324931-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-05T00:25:25,416 INFO [RS:0;2113c16e5528:37749 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-05T00:25:25,416 INFO [RS:0;2113c16e5528:37749 {}] hbase.ChoreService(168): Chore ScheduledChore name=2113c16e5528,37749,1733358324931-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-05T00:25:25,416 INFO [RS:0;2113c16e5528:37749 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-05T00:25:25,416 INFO [RS:0;2113c16e5528:37749 {}] regionserver.Replication(171): 2113c16e5528,37749,1733358324931 started 2024-12-05T00:25:25,436 INFO [RS:0;2113c16e5528:37749 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-05T00:25:25,436 INFO [RS:0;2113c16e5528:37749 {}] regionserver.HRegionServer(1482): Serving as 2113c16e5528,37749,1733358324931, RpcServer on 2113c16e5528/172.17.0.2:37749, sessionid=0x1018002f6bd0001 2024-12-05T00:25:25,436 DEBUG [RS:0;2113c16e5528:37749 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-05T00:25:25,436 DEBUG [RS:0;2113c16e5528:37749 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 2113c16e5528,37749,1733358324931 2024-12-05T00:25:25,437 DEBUG [RS:0;2113c16e5528:37749 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '2113c16e5528,37749,1733358324931' 2024-12-05T00:25:25,437 DEBUG [RS:0;2113c16e5528:37749 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-05T00:25:25,437 DEBUG [RS:0;2113c16e5528:37749 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-05T00:25:25,438 DEBUG [RS:0;2113c16e5528:37749 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-05T00:25:25,438 DEBUG [RS:0;2113c16e5528:37749 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-05T00:25:25,438 DEBUG [RS:0;2113c16e5528:37749 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 2113c16e5528,37749,1733358324931 2024-12-05T00:25:25,438 DEBUG [RS:0;2113c16e5528:37749 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '2113c16e5528,37749,1733358324931' 2024-12-05T00:25:25,438 DEBUG [RS:0;2113c16e5528:37749 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-05T00:25:25,438 DEBUG [RS:0;2113c16e5528:37749 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-05T00:25:25,439 DEBUG [RS:0;2113c16e5528:37749 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-05T00:25:25,439 INFO [RS:0;2113c16e5528:37749 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-05T00:25:25,439 INFO [RS:0;2113c16e5528:37749 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-05T00:25:25,492 WARN [2113c16e5528:43963 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-05T00:25:25,541 INFO [RS:0;2113c16e5528:37749 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=2113c16e5528%2C37749%2C1733358324931, suffix=, logDir=hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,37749,1733358324931, archiveDir=hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/oldWALs, maxLogs=32 2024-12-05T00:25:25,569 INFO [RS:0;2113c16e5528:37749 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 2113c16e5528%2C37749%2C1733358324931.1733358325568 2024-12-05T00:25:25,576 INFO [RS:0;2113c16e5528:37749 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,37749,1733358324931/2113c16e5528%2C37749%2C1733358324931.1733358325568 2024-12-05T00:25:25,580 DEBUG [RS:0;2113c16e5528:37749 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36003:36003),(127.0.0.1/127.0.0.1:34567:34567)] 2024-12-05T00:25:25,743 DEBUG [2113c16e5528:43963 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-05T00:25:25,743 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=2113c16e5528,37749,1733358324931 2024-12-05T00:25:25,745 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 2113c16e5528,37749,1733358324931, state=OPENING 2024-12-05T00:25:25,748 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-05T00:25:25,749 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43963-0x1018002f6bd0000, quorum=127.0.0.1:54477, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T00:25:25,749 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37749-0x1018002f6bd0001, quorum=127.0.0.1:54477, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T00:25:25,750 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-05T00:25:25,750 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-05T00:25:25,750 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-05T00:25:25,750 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=2113c16e5528,37749,1733358324931}] 2024-12-05T00:25:25,904 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-05T00:25:25,906 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43107, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-05T00:25:25,910 INFO [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-05T00:25:25,910 INFO [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-05T00:25:25,912 INFO [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=2113c16e5528%2C37749%2C1733358324931.meta, suffix=.meta, logDir=hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,37749,1733358324931, archiveDir=hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/oldWALs, maxLogs=32 2024-12-05T00:25:25,913 INFO [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 2113c16e5528%2C37749%2C1733358324931.meta.1733358325913.meta 2024-12-05T00:25:25,919 INFO [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,37749,1733358324931/2113c16e5528%2C37749%2C1733358324931.meta.1733358325913.meta 2024-12-05T00:25:25,922 DEBUG [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34567:34567),(127.0.0.1/127.0.0.1:36003:36003)] 2024-12-05T00:25:25,926 DEBUG [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-05T00:25:25,926 DEBUG [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-05T00:25:25,926 DEBUG [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-05T00:25:25,926 INFO [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-05T00:25:25,926 DEBUG [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-05T00:25:25,926 DEBUG [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T00:25:25,926 DEBUG [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-05T00:25:25,926 DEBUG [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-05T00:25:25,928 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-05T00:25:25,930 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-05T00:25:25,930 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T00:25:25,930 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-05T00:25:25,930 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-05T00:25:25,932 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-05T00:25:25,932 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T00:25:25,932 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-05T00:25:25,932 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-05T00:25:25,933 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-05T00:25:25,933 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T00:25:25,934 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-05T00:25:25,934 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-05T00:25:25,935 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-05T00:25:25,935 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T00:25:25,936 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-05T00:25:25,936 DEBUG [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-05T00:25:25,937 DEBUG [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/data/hbase/meta/1588230740 2024-12-05T00:25:25,938 DEBUG [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/data/hbase/meta/1588230740 2024-12-05T00:25:25,939 DEBUG [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-05T00:25:25,939 DEBUG [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-05T00:25:25,940 DEBUG [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-05T00:25:25,941 DEBUG [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-05T00:25:25,942 INFO [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=877642, jitterRate=0.11598049104213715}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-05T00:25:25,943 DEBUG [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-05T00:25:25,943 DEBUG [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733358325927Writing region info on filesystem at 1733358325927Initializing all the Stores at 1733358325928 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733358325928Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733358325928Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733358325928Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733358325928Cleaning up temporary data from old regions at 1733358325939 (+11 ms)Running coprocessor post-open hooks at 1733358325943 (+4 ms)Region opened successfully at 1733358325943 2024-12-05T00:25:25,944 INFO [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733358325903 2024-12-05T00:25:25,948 DEBUG [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-05T00:25:25,948 INFO [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-05T00:25:25,949 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=2113c16e5528,37749,1733358324931 2024-12-05T00:25:25,950 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 2113c16e5528,37749,1733358324931, state=OPEN 2024-12-05T00:25:25,956 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37749-0x1018002f6bd0001, quorum=127.0.0.1:54477, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-05T00:25:25,956 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43963-0x1018002f6bd0000, quorum=127.0.0.1:54477, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-05T00:25:25,957 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=2113c16e5528,37749,1733358324931 2024-12-05T00:25:25,957 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-05T00:25:25,957 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-05T00:25:25,961 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-05T00:25:25,961 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=2113c16e5528,37749,1733358324931 in 207 msec 2024-12-05T00:25:25,964 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-05T00:25:25,964 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 624 msec 2024-12-05T00:25:25,965 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-05T00:25:25,965 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-05T00:25:25,967 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-05T00:25:25,967 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=2113c16e5528,37749,1733358324931, seqNum=-1] 2024-12-05T00:25:25,968 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-05T00:25:25,969 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33891, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-05T00:25:25,977 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 759 msec 2024-12-05T00:25:25,977 INFO [master/2113c16e5528:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733358325977, completionTime=-1 2024-12-05T00:25:25,977 INFO [master/2113c16e5528:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-05T00:25:25,977 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-05T00:25:25,980 INFO [master/2113c16e5528:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-12-05T00:25:25,980 INFO [master/2113c16e5528:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733358385980 2024-12-05T00:25:25,980 INFO [master/2113c16e5528:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733358445980 2024-12-05T00:25:25,980 INFO [master/2113c16e5528:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-12-05T00:25:25,980 INFO [master/2113c16e5528:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=2113c16e5528,43963,1733358324824-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-05T00:25:25,981 INFO [master/2113c16e5528:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=2113c16e5528,43963,1733358324824-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-05T00:25:25,981 INFO [master/2113c16e5528:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=2113c16e5528,43963,1733358324824-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-05T00:25:25,981 INFO [master/2113c16e5528:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-2113c16e5528:43963, period=300000, unit=MILLISECONDS is enabled. 2024-12-05T00:25:25,981 INFO [master/2113c16e5528:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-05T00:25:25,982 INFO [master/2113c16e5528:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-05T00:25:25,983 DEBUG [master/2113c16e5528:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-05T00:25:25,986 INFO [master/2113c16e5528:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.988sec 2024-12-05T00:25:25,986 INFO [master/2113c16e5528:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-05T00:25:25,986 INFO [master/2113c16e5528:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-05T00:25:25,986 INFO [master/2113c16e5528:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-05T00:25:25,986 INFO [master/2113c16e5528:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-05T00:25:25,986 INFO [master/2113c16e5528:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-05T00:25:25,987 INFO [master/2113c16e5528:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=2113c16e5528,43963,1733358324824-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-05T00:25:25,987 INFO [master/2113c16e5528:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=2113c16e5528,43963,1733358324824-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-05T00:25:25,990 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-05T00:25:25,990 INFO [master/2113c16e5528:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-05T00:25:25,990 INFO [master/2113c16e5528:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=2113c16e5528,43963,1733358324824-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-05T00:25:26,078 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6cd7b3e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T00:25:26,078 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 2113c16e5528,43963,-1 for getting cluster id 2024-12-05T00:25:26,078 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-05T00:25:26,080 DEBUG [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '310224bd-22c8-4de5-9538-72faf04f48b2' 2024-12-05T00:25:26,081 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-05T00:25:26,081 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "310224bd-22c8-4de5-9538-72faf04f48b2" 2024-12-05T00:25:26,081 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7d65fa4a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T00:25:26,081 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [2113c16e5528,43963,-1] 2024-12-05T00:25:26,082 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-05T00:25:26,082 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T00:25:26,083 INFO [HMaster-EventLoopGroup-7-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54438, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-05T00:25:26,084 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@abbe752, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T00:25:26,085 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-05T00:25:26,086 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=2113c16e5528,37749,1733358324931, seqNum=-1] 2024-12-05T00:25:26,086 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-05T00:25:26,088 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34904, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-05T00:25:26,090 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=2113c16e5528,43963,1733358324824 2024-12-05T00:25:26,090 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-05T00:25:26,093 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-12-05T00:25:26,109 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/2113c16e5528:0 server-side Connection retries=45 2024-12-05T00:25:26,109 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-05T00:25:26,109 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-05T00:25:26,109 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-05T00:25:26,109 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-05T00:25:26,109 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-05T00:25:26,109 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-05T00:25:26,109 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-05T00:25:26,110 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:45031 2024-12-05T00:25:26,111 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:45031 connecting to ZooKeeper ensemble=127.0.0.1:54477 2024-12-05T00:25:26,112 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-05T00:25:26,114 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-05T00:25:26,119 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:450310x0, quorum=127.0.0.1:54477, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-05T00:25:26,119 DEBUG [Time-limited test {}] zookeeper.ZKUtil(111): regionserver:450310x0, quorum=127.0.0.1:54477, baseZNode=/hbase Set watcher on existing znode=/hbase/running 2024-12-05T00:25:26,119 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:45031-0x1018002f6bd0002 connected 2024-12-05T00:25:26,119 DEBUG [pool-381-thread-1 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: INIT 2024-12-05T00:25:26,120 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-05T00:25:26,122 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-05T00:25:26,122 DEBUG [Time-limited test {}] zookeeper.ZKUtil(111): regionserver:45031-0x1018002f6bd0002, quorum=127.0.0.1:54477, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-05T00:25:26,124 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45031-0x1018002f6bd0002, quorum=127.0.0.1:54477, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-05T00:25:26,129 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=45031 2024-12-05T00:25:26,131 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=45031 2024-12-05T00:25:26,132 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=45031 2024-12-05T00:25:26,133 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=45031 2024-12-05T00:25:26,133 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=45031 2024-12-05T00:25:26,134 INFO [RS:1;2113c16e5528:45031 {}] regionserver.HRegionServer(746): ClusterId : 310224bd-22c8-4de5-9538-72faf04f48b2 2024-12-05T00:25:26,134 DEBUG [RS:1;2113c16e5528:45031 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-05T00:25:26,137 DEBUG [RS:1;2113c16e5528:45031 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-05T00:25:26,137 DEBUG [RS:1;2113c16e5528:45031 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-05T00:25:26,138 DEBUG [RS:1;2113c16e5528:45031 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-05T00:25:26,139 DEBUG [RS:1;2113c16e5528:45031 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6c027df9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=2113c16e5528/172.17.0.2:0 2024-12-05T00:25:26,152 DEBUG [RS:1;2113c16e5528:45031 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;2113c16e5528:45031 2024-12-05T00:25:26,152 INFO [RS:1;2113c16e5528:45031 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-05T00:25:26,152 INFO [RS:1;2113c16e5528:45031 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-05T00:25:26,152 DEBUG [RS:1;2113c16e5528:45031 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-05T00:25:26,153 INFO [RS:1;2113c16e5528:45031 {}] regionserver.HRegionServer(2659): reportForDuty to master=2113c16e5528,43963,1733358324824 with port=45031, startcode=1733358326108 2024-12-05T00:25:26,153 DEBUG [RS:1;2113c16e5528:45031 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-05T00:25:26,155 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51289, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.3 (auth:SIMPLE), service=RegionServerStatusService 2024-12-05T00:25:26,156 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43963 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 2113c16e5528,45031,1733358326108 2024-12-05T00:25:26,156 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43963 {}] master.ServerManager(517): Registering regionserver=2113c16e5528,45031,1733358326108 2024-12-05T00:25:26,158 DEBUG [RS:1;2113c16e5528:45031 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8 2024-12-05T00:25:26,158 DEBUG [RS:1;2113c16e5528:45031 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:41383 2024-12-05T00:25:26,158 DEBUG [RS:1;2113c16e5528:45031 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-05T00:25:26,159 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43963-0x1018002f6bd0000, quorum=127.0.0.1:54477, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-05T00:25:26,160 DEBUG [RS:1;2113c16e5528:45031 {}] zookeeper.ZKUtil(111): regionserver:45031-0x1018002f6bd0002, quorum=127.0.0.1:54477, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/2113c16e5528,45031,1733358326108 2024-12-05T00:25:26,160 WARN [RS:1;2113c16e5528:45031 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-05T00:25:26,160 INFO [RS:1;2113c16e5528:45031 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-05T00:25:26,160 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [2113c16e5528,45031,1733358326108] 2024-12-05T00:25:26,160 DEBUG [RS:1;2113c16e5528:45031 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,45031,1733358326108 2024-12-05T00:25:26,164 INFO [RS:1;2113c16e5528:45031 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-05T00:25:26,166 INFO [RS:1;2113c16e5528:45031 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-05T00:25:26,166 INFO [RS:1;2113c16e5528:45031 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-05T00:25:26,167 INFO [RS:1;2113c16e5528:45031 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-05T00:25:26,168 INFO [RS:1;2113c16e5528:45031 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-05T00:25:26,169 INFO [RS:1;2113c16e5528:45031 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-05T00:25:26,169 INFO [RS:1;2113c16e5528:45031 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-05T00:25:26,170 DEBUG [RS:1;2113c16e5528:45031 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/2113c16e5528:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:25:26,170 DEBUG [RS:1;2113c16e5528:45031 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/2113c16e5528:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:25:26,170 DEBUG [RS:1;2113c16e5528:45031 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/2113c16e5528:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:25:26,170 DEBUG [RS:1;2113c16e5528:45031 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/2113c16e5528:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:25:26,170 DEBUG [RS:1;2113c16e5528:45031 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/2113c16e5528:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:25:26,170 DEBUG [RS:1;2113c16e5528:45031 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/2113c16e5528:0, corePoolSize=2, maxPoolSize=2 2024-12-05T00:25:26,170 DEBUG [RS:1;2113c16e5528:45031 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/2113c16e5528:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:25:26,170 DEBUG [RS:1;2113c16e5528:45031 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/2113c16e5528:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:25:26,170 DEBUG [RS:1;2113c16e5528:45031 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/2113c16e5528:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:25:26,170 DEBUG [RS:1;2113c16e5528:45031 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/2113c16e5528:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:25:26,170 DEBUG [RS:1;2113c16e5528:45031 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/2113c16e5528:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:25:26,170 DEBUG [RS:1;2113c16e5528:45031 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/2113c16e5528:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:25:26,170 DEBUG [RS:1;2113c16e5528:45031 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0, corePoolSize=3, maxPoolSize=3 2024-12-05T00:25:26,171 DEBUG [RS:1;2113c16e5528:45031 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/2113c16e5528:0, corePoolSize=3, maxPoolSize=3 2024-12-05T00:25:26,171 INFO [RS:1;2113c16e5528:45031 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-05T00:25:26,171 INFO [RS:1;2113c16e5528:45031 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-05T00:25:26,171 INFO [RS:1;2113c16e5528:45031 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-05T00:25:26,171 INFO [RS:1;2113c16e5528:45031 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-05T00:25:26,171 INFO [RS:1;2113c16e5528:45031 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-05T00:25:26,171 INFO [RS:1;2113c16e5528:45031 {}] hbase.ChoreService(168): Chore ScheduledChore name=2113c16e5528,45031,1733358326108-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-05T00:25:26,187 INFO [RS:1;2113c16e5528:45031 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-05T00:25:26,187 INFO [RS:1;2113c16e5528:45031 {}] hbase.ChoreService(168): Chore ScheduledChore name=2113c16e5528,45031,1733358326108-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-05T00:25:26,187 INFO [RS:1;2113c16e5528:45031 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-05T00:25:26,187 INFO [RS:1;2113c16e5528:45031 {}] regionserver.Replication(171): 2113c16e5528,45031,1733358326108 started 2024-12-05T00:25:26,201 INFO [RS:1;2113c16e5528:45031 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-05T00:25:26,202 INFO [RS:1;2113c16e5528:45031 {}] regionserver.HRegionServer(1482): Serving as 2113c16e5528,45031,1733358326108, RpcServer on 2113c16e5528/172.17.0.2:45031, sessionid=0x1018002f6bd0002 2024-12-05T00:25:26,202 DEBUG [RS:1;2113c16e5528:45031 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-05T00:25:26,202 DEBUG [RS:1;2113c16e5528:45031 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 2113c16e5528,45031,1733358326108 2024-12-05T00:25:26,202 INFO [Time-limited test {}] hbase.HBaseTestingUtil(2882): Started new server=Thread[RS:1;2113c16e5528:45031,5,FailOnTimeoutGroup] 2024-12-05T00:25:26,202 DEBUG [RS:1;2113c16e5528:45031 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '2113c16e5528,45031,1733358326108' 2024-12-05T00:25:26,202 DEBUG [RS:1;2113c16e5528:45031 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-05T00:25:26,202 INFO [Time-limited test {}] wal.TestLogRolling(207): Replication=2 2024-12-05T00:25:26,203 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-05T00:25:26,203 DEBUG [RS:1;2113c16e5528:45031 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-05T00:25:26,203 DEBUG [RS:1;2113c16e5528:45031 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-05T00:25:26,203 DEBUG [RS:1;2113c16e5528:45031 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-05T00:25:26,203 DEBUG [RS:1;2113c16e5528:45031 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 2113c16e5528,45031,1733358326108 2024-12-05T00:25:26,203 DEBUG [RS:1;2113c16e5528:45031 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '2113c16e5528,45031,1733358326108' 2024-12-05T00:25:26,203 DEBUG [RS:1;2113c16e5528:45031 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-05T00:25:26,204 DEBUG [RS:1;2113c16e5528:45031 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-05T00:25:26,204 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.AsyncConnectionImpl(321): The fetched master address is 2113c16e5528,43963,1733358324824 2024-12-05T00:25:26,204 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@cb496b3 2024-12-05T00:25:26,204 DEBUG [RS:1;2113c16e5528:45031 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-05T00:25:26,204 INFO [RS:1;2113c16e5528:45031 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-05T00:25:26,204 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-05T00:25:26,204 INFO [RS:1;2113c16e5528:45031 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-05T00:25:26,206 INFO [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54442, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-05T00:25:26,207 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43963 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-12-05T00:25:26,207 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43963 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-12-05T00:25:26,207 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43963 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRollOnDatanodeDeath', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-05T00:25:26,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43963 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath 2024-12-05T00:25:26,211 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_PRE_OPERATION 2024-12-05T00:25:26,211 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T00:25:26,211 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43963 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRollOnDatanodeDeath" procId is: 4 2024-12-05T00:25:26,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43963 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-05T00:25:26,213 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-05T00:25:26,220 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34943 is added to blk_1073741835_1011 (size=393) 2024-12-05T00:25:26,220 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35205 is added to blk_1073741835_1011 (size=393) 2024-12-05T00:25:26,222 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 8b4f65ad1ec496c8c72c57384036660f, NAME => 'TestLogRolling-testLogRollOnDatanodeDeath,,1733358326207.8b4f65ad1ec496c8c72c57384036660f.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRollOnDatanodeDeath', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8 2024-12-05T00:25:26,229 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34943 is added to blk_1073741836_1012 (size=76) 2024-12-05T00:25:26,230 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35205 is added to blk_1073741836_1012 (size=76) 2024-12-05T00:25:26,230 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnDatanodeDeath,,1733358326207.8b4f65ad1ec496c8c72c57384036660f.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T00:25:26,231 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1722): Closing 8b4f65ad1ec496c8c72c57384036660f, disabling compactions & flushes 2024-12-05T00:25:26,231 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnDatanodeDeath,,1733358326207.8b4f65ad1ec496c8c72c57384036660f. 2024-12-05T00:25:26,231 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1733358326207.8b4f65ad1ec496c8c72c57384036660f. 2024-12-05T00:25:26,231 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1733358326207.8b4f65ad1ec496c8c72c57384036660f. after waiting 0 ms 2024-12-05T00:25:26,231 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnDatanodeDeath,,1733358326207.8b4f65ad1ec496c8c72c57384036660f. 2024-12-05T00:25:26,231 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1733358326207.8b4f65ad1ec496c8c72c57384036660f. 2024-12-05T00:25:26,231 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1676): Region close journal for 8b4f65ad1ec496c8c72c57384036660f: Waiting for close lock at 1733358326230Disabling compacts and flushes for region at 1733358326230Disabling writes for close at 1733358326231 (+1 ms)Writing region close event to WAL at 1733358326231Closed at 1733358326231 2024-12-05T00:25:26,233 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_ADD_TO_META 2024-12-05T00:25:26,233 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRollOnDatanodeDeath,,1733358326207.8b4f65ad1ec496c8c72c57384036660f.","families":{"info":[{"qualifier":"regioninfo","vlen":75,"tag":[],"timestamp":"1733358326233"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733358326233"}]},"ts":"1733358326233"} 2024-12-05T00:25:26,236 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-05T00:25:26,238 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-05T00:25:26,238 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnDatanodeDeath","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733358326238"}]},"ts":"1733358326238"} 2024-12-05T00:25:26,241 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnDatanodeDeath, state=ENABLING in hbase:meta 2024-12-05T00:25:26,242 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=8b4f65ad1ec496c8c72c57384036660f, ASSIGN}] 2024-12-05T00:25:26,243 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=8b4f65ad1ec496c8c72c57384036660f, ASSIGN 2024-12-05T00:25:26,245 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=8b4f65ad1ec496c8c72c57384036660f, ASSIGN; state=OFFLINE, location=2113c16e5528,37749,1733358324931; forceNewPlan=false, retain=false 2024-12-05T00:25:26,307 INFO [RS:1;2113c16e5528:45031 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=2113c16e5528%2C45031%2C1733358326108, suffix=, logDir=hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,45031,1733358326108, archiveDir=hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/oldWALs, maxLogs=32 2024-12-05T00:25:26,308 INFO [RS:1;2113c16e5528:45031 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 2113c16e5528%2C45031%2C1733358326108.1733358326307 2024-12-05T00:25:26,318 INFO [RS:1;2113c16e5528:45031 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,45031,1733358326108/2113c16e5528%2C45031%2C1733358326108.1733358326307 2024-12-05T00:25:26,323 DEBUG [RS:1;2113c16e5528:45031 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36003:36003),(127.0.0.1/127.0.0.1:34567:34567)] 2024-12-05T00:25:26,396 INFO [2113c16e5528:43963 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-05T00:25:26,396 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=8b4f65ad1ec496c8c72c57384036660f, regionState=OPENING, regionLocation=2113c16e5528,37749,1733358324931 2024-12-05T00:25:26,399 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=8b4f65ad1ec496c8c72c57384036660f, ASSIGN because future has completed 2024-12-05T00:25:26,400 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 8b4f65ad1ec496c8c72c57384036660f, server=2113c16e5528,37749,1733358324931}] 2024-12-05T00:25:26,559 INFO [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRollOnDatanodeDeath,,1733358326207.8b4f65ad1ec496c8c72c57384036660f. 2024-12-05T00:25:26,559 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 8b4f65ad1ec496c8c72c57384036660f, NAME => 'TestLogRolling-testLogRollOnDatanodeDeath,,1733358326207.8b4f65ad1ec496c8c72c57384036660f.', STARTKEY => '', ENDKEY => ''} 2024-12-05T00:25:26,560 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRollOnDatanodeDeath 8b4f65ad1ec496c8c72c57384036660f 2024-12-05T00:25:26,560 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnDatanodeDeath,,1733358326207.8b4f65ad1ec496c8c72c57384036660f.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T00:25:26,560 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 8b4f65ad1ec496c8c72c57384036660f 2024-12-05T00:25:26,560 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 8b4f65ad1ec496c8c72c57384036660f 2024-12-05T00:25:26,561 INFO [StoreOpener-8b4f65ad1ec496c8c72c57384036660f-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 8b4f65ad1ec496c8c72c57384036660f 2024-12-05T00:25:26,563 INFO [StoreOpener-8b4f65ad1ec496c8c72c57384036660f-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 8b4f65ad1ec496c8c72c57384036660f columnFamilyName info 2024-12-05T00:25:26,563 DEBUG [StoreOpener-8b4f65ad1ec496c8c72c57384036660f-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T00:25:26,563 INFO [StoreOpener-8b4f65ad1ec496c8c72c57384036660f-1 {}] regionserver.HStore(327): Store=8b4f65ad1ec496c8c72c57384036660f/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T00:25:26,564 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 8b4f65ad1ec496c8c72c57384036660f 2024-12-05T00:25:26,564 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8b4f65ad1ec496c8c72c57384036660f 2024-12-05T00:25:26,565 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8b4f65ad1ec496c8c72c57384036660f 2024-12-05T00:25:26,565 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 8b4f65ad1ec496c8c72c57384036660f 2024-12-05T00:25:26,565 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 8b4f65ad1ec496c8c72c57384036660f 2024-12-05T00:25:26,567 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 8b4f65ad1ec496c8c72c57384036660f 2024-12-05T00:25:26,570 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8b4f65ad1ec496c8c72c57384036660f/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-05T00:25:26,571 INFO [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 8b4f65ad1ec496c8c72c57384036660f; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=806153, jitterRate=0.025077268481254578}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-05T00:25:26,571 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 8b4f65ad1ec496c8c72c57384036660f 2024-12-05T00:25:26,572 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 8b4f65ad1ec496c8c72c57384036660f: Running coprocessor pre-open hook at 1733358326560Writing region info on filesystem at 1733358326560Initializing all the Stores at 1733358326561 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733358326561Cleaning up temporary data from old regions at 1733358326565 (+4 ms)Running coprocessor post-open hooks at 1733358326571 (+6 ms)Region opened successfully at 1733358326571 2024-12-05T00:25:26,573 INFO [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRollOnDatanodeDeath,,1733358326207.8b4f65ad1ec496c8c72c57384036660f., pid=6, masterSystemTime=1733358326554 2024-12-05T00:25:26,576 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRollOnDatanodeDeath,,1733358326207.8b4f65ad1ec496c8c72c57384036660f. 2024-12-05T00:25:26,576 INFO [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRollOnDatanodeDeath,,1733358326207.8b4f65ad1ec496c8c72c57384036660f. 2024-12-05T00:25:26,577 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=8b4f65ad1ec496c8c72c57384036660f, regionState=OPEN, openSeqNum=2, regionLocation=2113c16e5528,37749,1733358324931 2024-12-05T00:25:26,580 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 8b4f65ad1ec496c8c72c57384036660f, server=2113c16e5528,37749,1733358324931 because future has completed 2024-12-05T00:25:26,585 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-12-05T00:25:26,585 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 8b4f65ad1ec496c8c72c57384036660f, server=2113c16e5528,37749,1733358324931 in 181 msec 2024-12-05T00:25:26,588 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-12-05T00:25:26,588 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=8b4f65ad1ec496c8c72c57384036660f, ASSIGN in 344 msec 2024-12-05T00:25:26,590 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-05T00:25:26,590 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnDatanodeDeath","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733358326590"}]},"ts":"1733358326590"} 2024-12-05T00:25:26,593 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnDatanodeDeath, state=ENABLED in hbase:meta 2024-12-05T00:25:26,594 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_POST_OPERATION 2024-12-05T00:25:26,597 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath in 387 msec 2024-12-05T00:25:30,851 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-05T00:25:30,854 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-05T00:25:30,875 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-05T00:25:30,878 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-05T00:25:30,879 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-05T00:25:31,372 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRollOnDatanodeDeath' 2024-12-05T00:25:34,743 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-05T00:25:34,744 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-12-05T00:25:34,744 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath 2024-12-05T00:25:34,744 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath Metrics about Tables on a single HBase RegionServer 2024-12-05T00:25:34,745 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-05T00:25:34,745 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-12-05T00:25:36,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43963 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-05T00:25:36,222 INFO [RPCClient-NioEventLoopGroup-4-11 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRollOnDatanodeDeath completed 2024-12-05T00:25:36,222 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRollOnDatanodeDeath,, stopping at row=TestLogRolling-testLogRollOnDatanodeDeath ,, for max=2147483647 with caching=100 2024-12-05T00:25:36,226 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRollOnDatanodeDeath 2024-12-05T00:25:36,226 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRollOnDatanodeDeath,,1733358326207.8b4f65ad1ec496c8c72c57384036660f. 2024-12-05T00:25:36,242 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-05T00:25:36,246 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-05T00:25:36,246 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-05T00:25:36,246 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-05T00:25:36,247 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-05T00:25:36,247 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@36d0b5ff{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7531cff9-2fe6-fdba-fd33-24a68d0721dc/hadoop.log.dir/,AVAILABLE} 2024-12-05T00:25:36,247 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@51f59516{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-05T00:25:36,362 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3c5438f9{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7531cff9-2fe6-fdba-fd33-24a68d0721dc/java.io.tmpdir/jetty-localhost-41441-hadoop-hdfs-3_4_1-tests_jar-_-any-17925318978216961591/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-05T00:25:36,363 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2e7a8425{HTTP/1.1, (http/1.1)}{localhost:41441} 2024-12-05T00:25:36,363 INFO [Time-limited test {}] server.Server(415): Started @116990ms 2024-12-05T00:25:36,364 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-05T00:25:36,401 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-05T00:25:36,405 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-05T00:25:36,406 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-05T00:25:36,406 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-05T00:25:36,406 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-05T00:25:36,407 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@712f5f14{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7531cff9-2fe6-fdba-fd33-24a68d0721dc/hadoop.log.dir/,AVAILABLE} 2024-12-05T00:25:36,407 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3afd309b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-05T00:25:36,470 WARN [Thread-829 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7531cff9-2fe6-fdba-fd33-24a68d0721dc/cluster_d40ff393-c986-a4ef-b5d0-88d02549ef9a/data/data6/current/BP-1914787219-172.17.0.2-1733358323877/current, will proceed with Du for space computation calculation, 2024-12-05T00:25:36,470 WARN [Thread-828 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7531cff9-2fe6-fdba-fd33-24a68d0721dc/cluster_d40ff393-c986-a4ef-b5d0-88d02549ef9a/data/data5/current/BP-1914787219-172.17.0.2-1733358323877/current, will proceed with Du for space computation calculation, 2024-12-05T00:25:36,489 WARN [Thread-808 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-05T00:25:36,491 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xa5d9857bed9d7b2b with lease ID 0x6f798abb99ec6dd4: Processing first storage report for DS-e0ccde86-205f-4834-b07f-b6930e81338d from datanode DatanodeRegistration(127.0.0.1:38197, datanodeUuid=675b84f7-6e45-4dbb-ac2e-cec9da575f5c, infoPort=38813, infoSecurePort=0, ipcPort=39191, storageInfo=lv=-57;cid=testClusterID;nsid=355484051;c=1733358323877) 2024-12-05T00:25:36,491 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa5d9857bed9d7b2b with lease ID 0x6f798abb99ec6dd4: from storage DS-e0ccde86-205f-4834-b07f-b6930e81338d node DatanodeRegistration(127.0.0.1:38197, datanodeUuid=675b84f7-6e45-4dbb-ac2e-cec9da575f5c, infoPort=38813, infoSecurePort=0, ipcPort=39191, storageInfo=lv=-57;cid=testClusterID;nsid=355484051;c=1733358323877), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-05T00:25:36,491 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xa5d9857bed9d7b2b with lease ID 0x6f798abb99ec6dd4: Processing first storage report for DS-63b69c75-e936-48b3-af8e-1c45ad8f5dea from datanode DatanodeRegistration(127.0.0.1:38197, datanodeUuid=675b84f7-6e45-4dbb-ac2e-cec9da575f5c, infoPort=38813, infoSecurePort=0, ipcPort=39191, storageInfo=lv=-57;cid=testClusterID;nsid=355484051;c=1733358323877) 2024-12-05T00:25:36,491 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa5d9857bed9d7b2b with lease ID 0x6f798abb99ec6dd4: from storage DS-63b69c75-e936-48b3-af8e-1c45ad8f5dea node DatanodeRegistration(127.0.0.1:38197, datanodeUuid=675b84f7-6e45-4dbb-ac2e-cec9da575f5c, infoPort=38813, infoSecurePort=0, ipcPort=39191, storageInfo=lv=-57;cid=testClusterID;nsid=355484051;c=1733358323877), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-05T00:25:36,526 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@39835cdd{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7531cff9-2fe6-fdba-fd33-24a68d0721dc/java.io.tmpdir/jetty-localhost-45471-hadoop-hdfs-3_4_1-tests_jar-_-any-3245640284674037907/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-05T00:25:36,527 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@12b19204{HTTP/1.1, (http/1.1)}{localhost:45471} 2024-12-05T00:25:36,527 INFO [Time-limited test {}] server.Server(415): Started @117154ms 2024-12-05T00:25:36,528 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-05T00:25:36,571 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-05T00:25:36,574 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-05T00:25:36,577 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-05T00:25:36,577 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-05T00:25:36,577 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-05T00:25:36,578 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@68004957{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7531cff9-2fe6-fdba-fd33-24a68d0721dc/hadoop.log.dir/,AVAILABLE} 2024-12-05T00:25:36,578 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@78e445ac{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-05T00:25:36,647 WARN [Thread-863 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7531cff9-2fe6-fdba-fd33-24a68d0721dc/cluster_d40ff393-c986-a4ef-b5d0-88d02549ef9a/data/data7/current/BP-1914787219-172.17.0.2-1733358323877/current, will proceed with Du for space computation calculation, 2024-12-05T00:25:36,650 WARN [Thread-864 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7531cff9-2fe6-fdba-fd33-24a68d0721dc/cluster_d40ff393-c986-a4ef-b5d0-88d02549ef9a/data/data8/current/BP-1914787219-172.17.0.2-1733358323877/current, will proceed with Du for space computation calculation, 2024-12-05T00:25:36,675 WARN [Thread-843 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-05T00:25:36,678 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x42274e413e958971 with lease ID 0x6f798abb99ec6dd5: Processing first storage report for DS-cf54d914-b4bf-4dee-84bc-66450ddb912c from datanode DatanodeRegistration(127.0.0.1:33065, datanodeUuid=b26ba40c-c3cb-422c-ad53-ec8d0852013b, infoPort=35713, infoSecurePort=0, ipcPort=43423, storageInfo=lv=-57;cid=testClusterID;nsid=355484051;c=1733358323877) 2024-12-05T00:25:36,678 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x42274e413e958971 with lease ID 0x6f798abb99ec6dd5: from storage DS-cf54d914-b4bf-4dee-84bc-66450ddb912c node DatanodeRegistration(127.0.0.1:33065, datanodeUuid=b26ba40c-c3cb-422c-ad53-ec8d0852013b, infoPort=35713, infoSecurePort=0, ipcPort=43423, storageInfo=lv=-57;cid=testClusterID;nsid=355484051;c=1733358323877), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-05T00:25:36,678 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x42274e413e958971 with lease ID 0x6f798abb99ec6dd5: Processing first storage report for DS-c04f67ce-67cc-4f2c-9f0c-96b0d1d1dc57 from datanode DatanodeRegistration(127.0.0.1:33065, datanodeUuid=b26ba40c-c3cb-422c-ad53-ec8d0852013b, infoPort=35713, infoSecurePort=0, ipcPort=43423, storageInfo=lv=-57;cid=testClusterID;nsid=355484051;c=1733358323877) 2024-12-05T00:25:36,678 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x42274e413e958971 with lease ID 0x6f798abb99ec6dd5: from storage DS-c04f67ce-67cc-4f2c-9f0c-96b0d1d1dc57 node DatanodeRegistration(127.0.0.1:33065, datanodeUuid=b26ba40c-c3cb-422c-ad53-ec8d0852013b, infoPort=35713, infoSecurePort=0, ipcPort=43423, storageInfo=lv=-57;cid=testClusterID;nsid=355484051;c=1733358323877), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-05T00:25:36,707 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5ea37f0d{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7531cff9-2fe6-fdba-fd33-24a68d0721dc/java.io.tmpdir/jetty-localhost-38525-hadoop-hdfs-3_4_1-tests_jar-_-any-6955722964641612418/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-05T00:25:36,707 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@15010086{HTTP/1.1, (http/1.1)}{localhost:38525} 2024-12-05T00:25:36,707 INFO [Time-limited test {}] server.Server(415): Started @117334ms 2024-12-05T00:25:36,708 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-05T00:25:36,817 WARN [Thread-889 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7531cff9-2fe6-fdba-fd33-24a68d0721dc/cluster_d40ff393-c986-a4ef-b5d0-88d02549ef9a/data/data9/current/BP-1914787219-172.17.0.2-1733358323877/current, will proceed with Du for space computation calculation, 2024-12-05T00:25:36,817 WARN [Thread-890 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7531cff9-2fe6-fdba-fd33-24a68d0721dc/cluster_d40ff393-c986-a4ef-b5d0-88d02549ef9a/data/data10/current/BP-1914787219-172.17.0.2-1733358323877/current, will proceed with Du for space computation calculation, 2024-12-05T00:25:36,835 WARN [Thread-878 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-05T00:25:36,838 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x63eeb9a403cb978e with lease ID 0x6f798abb99ec6dd6: Processing first storage report for DS-0c68e330-c341-401e-ab8a-5ef4bf86e24e from datanode DatanodeRegistration(127.0.0.1:44281, datanodeUuid=a937346a-8409-4f02-aafe-ab72fad9c7ae, infoPort=37035, infoSecurePort=0, ipcPort=46059, storageInfo=lv=-57;cid=testClusterID;nsid=355484051;c=1733358323877) 2024-12-05T00:25:36,838 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x63eeb9a403cb978e with lease ID 0x6f798abb99ec6dd6: from storage DS-0c68e330-c341-401e-ab8a-5ef4bf86e24e node DatanodeRegistration(127.0.0.1:44281, datanodeUuid=a937346a-8409-4f02-aafe-ab72fad9c7ae, infoPort=37035, infoSecurePort=0, ipcPort=46059, storageInfo=lv=-57;cid=testClusterID;nsid=355484051;c=1733358323877), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-05T00:25:36,838 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x63eeb9a403cb978e with lease ID 0x6f798abb99ec6dd6: Processing first storage report for DS-7841581b-131d-42bf-86cf-a948dc162809 from datanode DatanodeRegistration(127.0.0.1:44281, datanodeUuid=a937346a-8409-4f02-aafe-ab72fad9c7ae, infoPort=37035, infoSecurePort=0, ipcPort=46059, storageInfo=lv=-57;cid=testClusterID;nsid=355484051;c=1733358323877) 2024-12-05T00:25:36,838 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x63eeb9a403cb978e with lease ID 0x6f798abb99ec6dd6: from storage DS-7841581b-131d-42bf-86cf-a948dc162809 node DatanodeRegistration(127.0.0.1:44281, datanodeUuid=a937346a-8409-4f02-aafe-ab72fad9c7ae, infoPort=37035, infoSecurePort=0, ipcPort=46059, storageInfo=lv=-57;cid=testClusterID;nsid=355484051;c=1733358323877), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-05T00:25:36,930 WARN [ResponseProcessor for block BP-1914787219-172.17.0.2-1733358323877:blk_1073741833_1009 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1914787219-172.17.0.2-1733358323877:blk_1073741833_1009 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-05T00:25:36,930 WARN [ResponseProcessor for block BP-1914787219-172.17.0.2-1733358323877:blk_1073741837_1013 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1914787219-172.17.0.2-1733358323877:blk_1073741837_1013 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-05T00:25:36,930 WARN [ResponseProcessor for block BP-1914787219-172.17.0.2-1733358323877:blk_1073741830_1006 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1914787219-172.17.0.2-1733358323877:blk_1073741830_1006 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-05T00:25:36,930 WARN [ResponseProcessor for block BP-1914787219-172.17.0.2-1733358323877:blk_1073741834_1010 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1914787219-172.17.0.2-1733358323877:blk_1073741834_1010 java.io.IOException: Bad response ERROR for BP-1914787219-172.17.0.2-1733358323877:blk_1073741834_1010 from datanode DatanodeInfoWithStorage[127.0.0.1:34943,DS-32e7a19b-9b43-45a3-b613-bb1258e4827b,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-05T00:25:36,930 WARN [DataStreamer for file /user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,37749,1733358324931/2113c16e5528%2C37749%2C1733358324931.1733358325568 block BP-1914787219-172.17.0.2-1733358323877:blk_1073741833_1009 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1914787219-172.17.0.2-1733358323877:blk_1073741833_1009 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34943,DS-32e7a19b-9b43-45a3-b613-bb1258e4827b,DISK], DatanodeInfoWithStorage[127.0.0.1:35205,DS-7b93ce7a-bb65-4ad6-a1d4-75b5c5ef17f8,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34943,DS-32e7a19b-9b43-45a3-b613-bb1258e4827b,DISK]) is bad. 2024-12-05T00:25:36,930 WARN [DataStreamer for file /user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,45031,1733358326108/2113c16e5528%2C45031%2C1733358326108.1733358326307 block BP-1914787219-172.17.0.2-1733358323877:blk_1073741837_1013 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1914787219-172.17.0.2-1733358323877:blk_1073741837_1013 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34943,DS-32e7a19b-9b43-45a3-b613-bb1258e4827b,DISK], DatanodeInfoWithStorage[127.0.0.1:35205,DS-7b93ce7a-bb65-4ad6-a1d4-75b5c5ef17f8,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34943,DS-32e7a19b-9b43-45a3-b613-bb1258e4827b,DISK]) is bad. 2024-12-05T00:25:36,930 WARN [DataStreamer for file /user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,37749,1733358324931/2113c16e5528%2C37749%2C1733358324931.meta.1733358325913.meta block BP-1914787219-172.17.0.2-1733358323877:blk_1073741834_1010 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1914787219-172.17.0.2-1733358323877:blk_1073741834_1010 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35205,DS-7b93ce7a-bb65-4ad6-a1d4-75b5c5ef17f8,DISK], DatanodeInfoWithStorage[127.0.0.1:34943,DS-32e7a19b-9b43-45a3-b613-bb1258e4827b,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:34943,DS-32e7a19b-9b43-45a3-b613-bb1258e4827b,DISK]) is bad. 2024-12-05T00:25:36,930 WARN [DataStreamer for file /user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/MasterData/WALs/2113c16e5528,43963,1733358324824/2113c16e5528%2C43963%2C1733358324824.1733358325078 block BP-1914787219-172.17.0.2-1733358323877:blk_1073741830_1006 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1914787219-172.17.0.2-1733358323877:blk_1073741830_1006 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34943,DS-32e7a19b-9b43-45a3-b613-bb1258e4827b,DISK], DatanodeInfoWithStorage[127.0.0.1:35205,DS-7b93ce7a-bb65-4ad6-a1d4-75b5c5ef17f8,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34943,DS-32e7a19b-9b43-45a3-b613-bb1258e4827b,DISK]) is bad. 2024-12-05T00:25:36,931 WARN [PacketResponder: BP-1914787219-172.17.0.2-1733358323877:blk_1073741834_1010, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:34943] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Broken pipe at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-05T00:25:36,932 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-377644347_22 at /127.0.0.1:33878 [Receiving block BP-1914787219-172.17.0.2-1733358323877:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:34943:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:33878 dst: /127.0.0.1:34943 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-05T00:25:36,932 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-570305084_22 at /127.0.0.1:33910 [Receiving block BP-1914787219-172.17.0.2-1733358323877:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:34943:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:33910 dst: /127.0.0.1:34943 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-05T00:25:36,932 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_453251423_22 at /127.0.0.1:37966 [Receiving block BP-1914787219-172.17.0.2-1733358323877:blk_1073741837_1013] {}] datanode.DataXceiver(331): 127.0.0.1:35205:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:37966 dst: /127.0.0.1:35205 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-05T00:25:36,932 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_453251423_22 at /127.0.0.1:33936 [Receiving block BP-1914787219-172.17.0.2-1733358323877:blk_1073741837_1013] {}] datanode.DataXceiver(331): 127.0.0.1:34943:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:33936 dst: /127.0.0.1:34943 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-05T00:25:36,932 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-570305084_22 at /127.0.0.1:33912 [Receiving block BP-1914787219-172.17.0.2-1733358323877:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:34943:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:33912 dst: /127.0.0.1:34943 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-05T00:25:36,932 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-570305084_22 at /127.0.0.1:37930 [Receiving block BP-1914787219-172.17.0.2-1733358323877:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:35205:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:37930 dst: /127.0.0.1:35205 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-05T00:25:36,933 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@21d5e4af{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-05T00:25:36,933 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-377644347_22 at /127.0.0.1:37886 [Receiving block BP-1914787219-172.17.0.2-1733358323877:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:35205:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:37886 dst: /127.0.0.1:35205 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-05T00:25:36,933 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-570305084_22 at /127.0.0.1:37922 [Receiving block BP-1914787219-172.17.0.2-1733358323877:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:35205:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:37922 dst: /127.0.0.1:35205 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-05T00:25:36,933 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@145c0180{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-05T00:25:36,934 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-05T00:25:36,934 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7720beab{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-05T00:25:36,934 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@69bbaec1{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7531cff9-2fe6-fdba-fd33-24a68d0721dc/hadoop.log.dir/,STOPPED} 2024-12-05T00:25:36,935 WARN [BP-1914787219-172.17.0.2-1733358323877 heartbeating to localhost/127.0.0.1:41383 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-05T00:25:36,935 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-05T00:25:36,935 WARN [BP-1914787219-172.17.0.2-1733358323877 heartbeating to localhost/127.0.0.1:41383 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1914787219-172.17.0.2-1733358323877 (Datanode Uuid 62e5d8db-0dd5-4ee1-910d-f64b93970567) service to localhost/127.0.0.1:41383 2024-12-05T00:25:36,935 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-05T00:25:36,935 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7531cff9-2fe6-fdba-fd33-24a68d0721dc/cluster_d40ff393-c986-a4ef-b5d0-88d02549ef9a/data/data3/current/BP-1914787219-172.17.0.2-1733358323877 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-05T00:25:36,936 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7531cff9-2fe6-fdba-fd33-24a68d0721dc/cluster_d40ff393-c986-a4ef-b5d0-88d02549ef9a/data/data4/current/BP-1914787219-172.17.0.2-1733358323877 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-05T00:25:36,936 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-05T00:25:36,937 WARN [DataStreamer for file /user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/MasterData/WALs/2113c16e5528,43963,1733358324824/2113c16e5528%2C43963%2C1733358324824.1733358325078 block BP-1914787219-172.17.0.2-1733358323877:blk_1073741830_1006 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741830_1006 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-05T00:25:36,941 WARN [DataStreamer for file /user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,45031,1733358326108/2113c16e5528%2C45031%2C1733358326108.1733358326307 block BP-1914787219-172.17.0.2-1733358323877:blk_1073741837_1013 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741837_1013 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-05T00:25:36,943 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@ab5393f{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-05T00:25:36,943 WARN [DataStreamer for file /user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,37749,1733358324931/2113c16e5528%2C37749%2C1733358324931.1733358325568 block BP-1914787219-172.17.0.2-1733358323877:blk_1073741833_1009 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741833_1009 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-05T00:25:36,943 WARN [DataStreamer for file /user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,37749,1733358324931/2113c16e5528%2C37749%2C1733358324931.meta.1733358325913.meta block BP-1914787219-172.17.0.2-1733358323877:blk_1073741834_1010 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741834_1010 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-05T00:25:36,944 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@47f82e76{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-05T00:25:36,944 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-05T00:25:36,944 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@30e7c448{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-05T00:25:36,944 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@257cf4bb{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7531cff9-2fe6-fdba-fd33-24a68d0721dc/hadoop.log.dir/,STOPPED} 2024-12-05T00:25:36,945 WARN [BP-1914787219-172.17.0.2-1733358323877 heartbeating to localhost/127.0.0.1:41383 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-05T00:25:36,945 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-05T00:25:36,946 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-05T00:25:36,946 WARN [BP-1914787219-172.17.0.2-1733358323877 heartbeating to localhost/127.0.0.1:41383 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1914787219-172.17.0.2-1733358323877 (Datanode Uuid 515aad1c-3331-473f-a1b0-e49d806ffab8) service to localhost/127.0.0.1:41383 2024-12-05T00:25:36,946 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7531cff9-2fe6-fdba-fd33-24a68d0721dc/cluster_d40ff393-c986-a4ef-b5d0-88d02549ef9a/data/data1/current/BP-1914787219-172.17.0.2-1733358323877 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-05T00:25:36,946 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7531cff9-2fe6-fdba-fd33-24a68d0721dc/cluster_d40ff393-c986-a4ef-b5d0-88d02549ef9a/data/data2/current/BP-1914787219-172.17.0.2-1733358323877 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-05T00:25:36,947 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-05T00:25:36,950 DEBUG [RPCClient-NioEventLoopGroup-4-10 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRollOnDatanodeDeath', row='row0002', locateType=CURRENT is [region=TestLogRolling-testLogRollOnDatanodeDeath,,1733358326207.8b4f65ad1ec496c8c72c57384036660f., hostname=2113c16e5528,37749,1733358324931, seqNum=2] 2024-12-05T00:25:36,952 ERROR [FSHLog-0-hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8-prefix:2113c16e5528,37749,1733358324931 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35205,DS-7b93ce7a-bb65-4ad6-a1d4-75b5c5ef17f8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-05T00:25:36,952 WARN [FSHLog-0-hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8-prefix:2113c16e5528,37749,1733358324931 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35205,DS-7b93ce7a-bb65-4ad6-a1d4-75b5c5ef17f8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-05T00:25:36,952 INFO [regionserver/2113c16e5528:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35205,DS-7b93ce7a-bb65-4ad6-a1d4-75b5c5ef17f8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-05T00:25:36,952 DEBUG [regionserver/2113c16e5528:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 2113c16e5528%2C37749%2C1733358324931:(num 1733358325568) roll requested 2024-12-05T00:25:36,953 INFO [regionserver/2113c16e5528:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 2113c16e5528%2C37749%2C1733358324931.1733358336953 2024-12-05T00:25:36,959 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:25:36,959 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:25:36,959 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:25:36,959 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:25:36,959 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:25:36,959 INFO [regionserver/2113c16e5528:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,37749,1733358324931/2113c16e5528%2C37749%2C1733358324931.1733358325568 with entries=1, filesize=455 B; new WAL /user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,37749,1733358324931/2113c16e5528%2C37749%2C1733358324931.1733358336953 2024-12-05T00:25:36,960 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35205,DS-7b93ce7a-bb65-4ad6-a1d4-75b5c5ef17f8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-05T00:25:36,960 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35205,DS-7b93ce7a-bb65-4ad6-a1d4-75b5c5ef17f8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-05T00:25:36,960 DEBUG [regionserver/2113c16e5528:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35713:35713),(127.0.0.1/127.0.0.1:37035:37035)] 2024-12-05T00:25:36,960 DEBUG [regionserver/2113c16e5528:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,37749,1733358324931/2113c16e5528%2C37749%2C1733358324931.1733358325568 is not closed yet, will try archiving it next time 2024-12-05T00:25:36,961 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(47): Initialize RecoverLeaseFSUtils 2024-12-05T00:25:36,961 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(59): set recoverLeaseMethod to org.apache.hadoop.fs.LeaseRecoverable.recoverLease() 2024-12-05T00:25:36,961 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,37749,1733358324931/2113c16e5528%2C37749%2C1733358324931.1733358325568 2024-12-05T00:25:36,964 WARN [IPC Server handler 3 on default port 41383 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,37749,1733358324931/2113c16e5528%2C37749%2C1733358324931.1733358325568 has not been closed. Lease recovery is in progress. RecoveryId = 1019 for block blk_1073741833_1009 2024-12-05T00:25:36,967 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,37749,1733358324931/2113c16e5528%2C37749%2C1733358324931.1733358325568 after 5ms 2024-12-05T00:25:37,051 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35205,DS-7b93ce7a-bb65-4ad6-a1d4-75b5c5ef17f8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-05T00:25:38,172 INFO [regionserver/2113c16e5528:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35205,DS-7b93ce7a-bb65-4ad6-a1d4-75b5c5ef17f8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-05T00:25:38,961 INFO [regionserver/2113c16e5528:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35205,DS-7b93ce7a-bb65-4ad6-a1d4-75b5c5ef17f8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-05T00:25:38,962 INFO [Time-limited test {}] wal.TestLogRolling(261): log.getCurrentFileName(): hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,37749,1733358324931/2113c16e5528%2C37749%2C1733358324931.1733358336953 2024-12-05T00:25:38,962 WARN [ResponseProcessor for block BP-1914787219-172.17.0.2-1733358323877:blk_1073741838_1018 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1914787219-172.17.0.2-1733358323877:blk_1073741838_1018 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-05T00:25:38,963 WARN [DataStreamer for file /user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,37749,1733358324931/2113c16e5528%2C37749%2C1733358324931.1733358336953 block BP-1914787219-172.17.0.2-1733358323877:blk_1073741838_1018 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1914787219-172.17.0.2-1733358323877:blk_1073741838_1018 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33065,DS-cf54d914-b4bf-4dee-84bc-66450ddb912c,DISK], DatanodeInfoWithStorage[127.0.0.1:44281,DS-0c68e330-c341-401e-ab8a-5ef4bf86e24e,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33065,DS-cf54d914-b4bf-4dee-84bc-66450ddb912c,DISK]) is bad. 2024-12-05T00:25:38,963 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-570305084_22 at /127.0.0.1:60390 [Receiving block BP-1914787219-172.17.0.2-1733358323877:blk_1073741838_1018] {}] datanode.DataXceiver(331): 127.0.0.1:33065:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60390 dst: /127.0.0.1:33065 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-05T00:25:38,963 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-570305084_22 at /127.0.0.1:57704 [Receiving block BP-1914787219-172.17.0.2-1733358323877:blk_1073741838_1018] {}] datanode.DataXceiver(331): 127.0.0.1:44281:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:57704 dst: /127.0.0.1:44281 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-05T00:25:38,965 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@39835cdd{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-05T00:25:38,965 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@12b19204{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-05T00:25:38,965 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-05T00:25:38,965 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3afd309b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-05T00:25:38,965 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@712f5f14{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7531cff9-2fe6-fdba-fd33-24a68d0721dc/hadoop.log.dir/,STOPPED} 2024-12-05T00:25:38,967 WARN [BP-1914787219-172.17.0.2-1733358323877 heartbeating to localhost/127.0.0.1:41383 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-05T00:25:38,967 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-05T00:25:38,967 WARN [BP-1914787219-172.17.0.2-1733358323877 heartbeating to localhost/127.0.0.1:41383 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1914787219-172.17.0.2-1733358323877 (Datanode Uuid b26ba40c-c3cb-422c-ad53-ec8d0852013b) service to localhost/127.0.0.1:41383 2024-12-05T00:25:38,967 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-05T00:25:38,967 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7531cff9-2fe6-fdba-fd33-24a68d0721dc/cluster_d40ff393-c986-a4ef-b5d0-88d02549ef9a/data/data7/current/BP-1914787219-172.17.0.2-1733358323877 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-05T00:25:38,967 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7531cff9-2fe6-fdba-fd33-24a68d0721dc/cluster_d40ff393-c986-a4ef-b5d0-88d02549ef9a/data/data8/current/BP-1914787219-172.17.0.2-1733358323877 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-05T00:25:38,968 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-05T00:25:39,051 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35205,DS-7b93ce7a-bb65-4ad6-a1d4-75b5c5ef17f8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-05T00:25:40,172 INFO [regionserver/2113c16e5528:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35205,DS-7b93ce7a-bb65-4ad6-a1d4-75b5c5ef17f8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-05T00:25:40,961 INFO [regionserver/2113c16e5528:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35205,DS-7b93ce7a-bb65-4ad6-a1d4-75b5c5ef17f8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-05T00:25:40,962 WARN [regionserver/2113c16e5528:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44281,DS-0c68e330-c341-401e-ab8a-5ef4bf86e24e,DISK]] 2024-12-05T00:25:40,962 DEBUG [regionserver/2113c16e5528:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 2113c16e5528%2C37749%2C1733358324931:(num 1733358336953) roll requested 2024-12-05T00:25:40,962 INFO [regionserver/2113c16e5528:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 2113c16e5528%2C37749%2C1733358324931.1733358340962 2024-12-05T00:25:40,965 WARN [Thread-909 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741839_1021 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-05T00:25:40,965 WARN [Thread-909 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1914787219-172.17.0.2-1733358323877:blk_1073741839_1021 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35205,DS-7b93ce7a-bb65-4ad6-a1d4-75b5c5ef17f8,DISK], DatanodeInfoWithStorage[127.0.0.1:34943,DS-32e7a19b-9b43-45a3-b613-bb1258e4827b,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35205,DS-7b93ce7a-bb65-4ad6-a1d4-75b5c5ef17f8,DISK]) is bad. 2024-12-05T00:25:40,966 WARN [Thread-909 {}] hdfs.DataStreamer(1850): Abandoning BP-1914787219-172.17.0.2-1733358323877:blk_1073741839_1021 2024-12-05T00:25:40,968 WARN [Thread-909 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35205,DS-7b93ce7a-bb65-4ad6-a1d4-75b5c5ef17f8,DISK] 2024-12-05T00:25:40,968 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,37749,1733358324931/2113c16e5528%2C37749%2C1733358324931.1733358325568 after 4007ms 2024-12-05T00:25:40,973 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-12-05T00:25:40,976 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:25:40,976 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:25:40,976 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:25:40,976 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:25:40,976 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:25:40,976 INFO [regionserver/2113c16e5528:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,37749,1733358324931/2113c16e5528%2C37749%2C1733358324931.1733358336953 with entries=3, filesize=3.51 KB; new WAL /user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,37749,1733358324931/2113c16e5528%2C37749%2C1733358324931.1733358340962 2024-12-05T00:25:40,977 DEBUG [regionserver/2113c16e5528:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37035:37035),(127.0.0.1/127.0.0.1:38813:38813)] 2024-12-05T00:25:40,977 DEBUG [regionserver/2113c16e5528:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,37749,1733358324931/2113c16e5528%2C37749%2C1733358324931.1733358325568 is not closed yet, will try archiving it next time 2024-12-05T00:25:40,977 DEBUG [regionserver/2113c16e5528:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,37749,1733358324931/2113c16e5528%2C37749%2C1733358324931.1733358336953 is not closed yet, will try archiving it next time 2024-12-05T00:25:40,978 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44281 is added to blk_1073741838_1020 (size=3600) 2024-12-05T00:25:41,052 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35205,DS-7b93ce7a-bb65-4ad6-a1d4-75b5c5ef17f8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-05T00:25:41,379 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,37749,1733358324931/2113c16e5528%2C37749%2C1733358324931.1733358325568 is not closed yet, will try archiving it next time 2024-12-05T00:25:42,172 INFO [regionserver/2113c16e5528:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35205,DS-7b93ce7a-bb65-4ad6-a1d4-75b5c5ef17f8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-05T00:25:42,850 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@46df7b42[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:44281, datanodeUuid=a937346a-8409-4f02-aafe-ab72fad9c7ae, infoPort=37035, infoSecurePort=0, ipcPort=46059, storageInfo=lv=-57;cid=testClusterID;nsid=355484051;c=1733358323877):Failed to transfer BP-1914787219-172.17.0.2-1733358323877:blk_1073741838_1020 to 127.0.0.1:34943 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-05T00:25:42,976 WARN [ResponseProcessor for block BP-1914787219-172.17.0.2-1733358323877:blk_1073741840_1022 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1914787219-172.17.0.2-1733358323877:blk_1073741840_1022 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-05T00:25:42,977 WARN [DataStreamer for file /user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,37749,1733358324931/2113c16e5528%2C37749%2C1733358324931.1733358340962 block BP-1914787219-172.17.0.2-1733358323877:blk_1073741840_1022 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1914787219-172.17.0.2-1733358323877:blk_1073741840_1022 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44281,DS-0c68e330-c341-401e-ab8a-5ef4bf86e24e,DISK], DatanodeInfoWithStorage[127.0.0.1:38197,DS-e0ccde86-205f-4834-b07f-b6930e81338d,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44281,DS-0c68e330-c341-401e-ab8a-5ef4bf86e24e,DISK]) is bad. 2024-12-05T00:25:42,977 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-570305084_22 at /127.0.0.1:41754 [Receiving block BP-1914787219-172.17.0.2-1733358323877:blk_1073741840_1022] {}] datanode.DataXceiver(331): 127.0.0.1:44281:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41754 dst: /127.0.0.1:44281 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-05T00:25:42,977 INFO [regionserver/2113c16e5528:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35205,DS-7b93ce7a-bb65-4ad6-a1d4-75b5c5ef17f8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-05T00:25:42,977 WARN [regionserver/2113c16e5528:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38197,DS-e0ccde86-205f-4834-b07f-b6930e81338d,DISK]] 2024-12-05T00:25:42,977 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-570305084_22 at /127.0.0.1:36438 [Receiving block BP-1914787219-172.17.0.2-1733358323877:blk_1073741840_1022] {}] datanode.DataXceiver(331): 127.0.0.1:38197:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:36438 dst: /127.0.0.1:38197 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-05T00:25:42,978 DEBUG [regionserver/2113c16e5528:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 2113c16e5528%2C37749%2C1733358324931:(num 1733358340962) roll requested 2024-12-05T00:25:42,978 INFO [regionserver/2113c16e5528:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 2113c16e5528%2C37749%2C1733358324931.1733358342978 2024-12-05T00:25:42,979 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5ea37f0d{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-05T00:25:42,979 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@15010086{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-05T00:25:42,980 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-05T00:25:42,980 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@78e445ac{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-05T00:25:42,980 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@68004957{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7531cff9-2fe6-fdba-fd33-24a68d0721dc/hadoop.log.dir/,STOPPED} 2024-12-05T00:25:42,981 WARN [Thread-918 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741841_1024 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-05T00:25:42,981 WARN [BP-1914787219-172.17.0.2-1733358323877 heartbeating to localhost/127.0.0.1:41383 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-05T00:25:42,981 WARN [Thread-918 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1914787219-172.17.0.2-1733358323877:blk_1073741841_1024 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33065,DS-cf54d914-b4bf-4dee-84bc-66450ddb912c,DISK], DatanodeInfoWithStorage[127.0.0.1:34943,DS-32e7a19b-9b43-45a3-b613-bb1258e4827b,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33065,DS-cf54d914-b4bf-4dee-84bc-66450ddb912c,DISK]) is bad. 2024-12-05T00:25:42,981 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-05T00:25:42,981 WARN [Thread-918 {}] hdfs.DataStreamer(1850): Abandoning BP-1914787219-172.17.0.2-1733358323877:blk_1073741841_1024 2024-12-05T00:25:42,981 WARN [BP-1914787219-172.17.0.2-1733358323877 heartbeating to localhost/127.0.0.1:41383 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1914787219-172.17.0.2-1733358323877 (Datanode Uuid a937346a-8409-4f02-aafe-ab72fad9c7ae) service to localhost/127.0.0.1:41383 2024-12-05T00:25:42,981 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-05T00:25:42,982 WARN [Thread-918 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33065,DS-cf54d914-b4bf-4dee-84bc-66450ddb912c,DISK] 2024-12-05T00:25:42,982 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7531cff9-2fe6-fdba-fd33-24a68d0721dc/cluster_d40ff393-c986-a4ef-b5d0-88d02549ef9a/data/data9/current/BP-1914787219-172.17.0.2-1733358323877 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-05T00:25:42,982 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7531cff9-2fe6-fdba-fd33-24a68d0721dc/cluster_d40ff393-c986-a4ef-b5d0-88d02549ef9a/data/data10/current/BP-1914787219-172.17.0.2-1733358323877 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-05T00:25:42,982 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-05T00:25:42,987 WARN [Thread-918 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741842_1025 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:34943 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-05T00:25:42,987 WARN [Thread-918 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1914787219-172.17.0.2-1733358323877:blk_1073741842_1025 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38197,DS-e0ccde86-205f-4834-b07f-b6930e81338d,DISK], DatanodeInfoWithStorage[127.0.0.1:34943,DS-32e7a19b-9b43-45a3-b613-bb1258e4827b,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:34943,DS-32e7a19b-9b43-45a3-b613-bb1258e4827b,DISK]) is bad. 2024-12-05T00:25:42,987 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-570305084_22 at /127.0.0.1:36464 [Receiving block BP-1914787219-172.17.0.2-1733358323877:blk_1073741842_1025] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7531cff9-2fe6-fdba-fd33-24a68d0721dc/cluster_d40ff393-c986-a4ef-b5d0-88d02549ef9a/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7531cff9-2fe6-fdba-fd33-24a68d0721dc/cluster_d40ff393-c986-a4ef-b5d0-88d02549ef9a/data/data6]'}, localName='127.0.0.1:38197', datanodeUuid='675b84f7-6e45-4dbb-ac2e-cec9da575f5c', xmitsInProgress=0}:Exception transferring block BP-1914787219-172.17.0.2-1733358323877:blk_1073741842_1025 to mirror 127.0.0.1:34943 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-05T00:25:42,987 WARN [Thread-918 {}] hdfs.DataStreamer(1850): Abandoning BP-1914787219-172.17.0.2-1733358323877:blk_1073741842_1025 2024-12-05T00:25:42,987 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-570305084_22 at /127.0.0.1:36464 [Receiving block BP-1914787219-172.17.0.2-1733358323877:blk_1073741842_1025] {}] datanode.BlockReceiver(316): Block 1073741842 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-12-05T00:25:42,988 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-570305084_22 at /127.0.0.1:36464 [Receiving block BP-1914787219-172.17.0.2-1733358323877:blk_1073741842_1025] {}] datanode.DataXceiver(331): 127.0.0.1:38197:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:36464 dst: /127.0.0.1:38197 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-05T00:25:42,988 WARN [Thread-918 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34943,DS-32e7a19b-9b43-45a3-b613-bb1258e4827b,DISK] 2024-12-05T00:25:42,989 WARN [Thread-918 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741843_1026 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-05T00:25:42,989 WARN [Thread-918 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1914787219-172.17.0.2-1733358323877:blk_1073741843_1026 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35205,DS-7b93ce7a-bb65-4ad6-a1d4-75b5c5ef17f8,DISK], DatanodeInfoWithStorage[127.0.0.1:38197,DS-e0ccde86-205f-4834-b07f-b6930e81338d,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35205,DS-7b93ce7a-bb65-4ad6-a1d4-75b5c5ef17f8,DISK]) is bad. 2024-12-05T00:25:42,989 WARN [Thread-918 {}] hdfs.DataStreamer(1850): Abandoning BP-1914787219-172.17.0.2-1733358323877:blk_1073741843_1026 2024-12-05T00:25:42,990 WARN [Thread-918 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35205,DS-7b93ce7a-bb65-4ad6-a1d4-75b5c5ef17f8,DISK] 2024-12-05T00:25:42,991 WARN [Thread-918 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741844_1027 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-05T00:25:42,991 WARN [Thread-918 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1914787219-172.17.0.2-1733358323877:blk_1073741844_1027 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44281,DS-0c68e330-c341-401e-ab8a-5ef4bf86e24e,DISK], DatanodeInfoWithStorage[127.0.0.1:38197,DS-e0ccde86-205f-4834-b07f-b6930e81338d,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44281,DS-0c68e330-c341-401e-ab8a-5ef4bf86e24e,DISK]) is bad. 2024-12-05T00:25:42,991 WARN [Thread-918 {}] hdfs.DataStreamer(1850): Abandoning BP-1914787219-172.17.0.2-1733358323877:blk_1073741844_1027 2024-12-05T00:25:42,992 WARN [Thread-918 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44281,DS-0c68e330-c341-401e-ab8a-5ef4bf86e24e,DISK] 2024-12-05T00:25:42,992 WARN [IPC Server handler 1 on default port 41383 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-05T00:25:42,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37749 {}] regionserver.HRegion(8855): Flush requested on 8b4f65ad1ec496c8c72c57384036660f 2024-12-05T00:25:42,993 WARN [IPC Server handler 1 on default port 41383 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-05T00:25:42,993 WARN [IPC Server handler 1 on default port 41383 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-05T00:25:42,993 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 8b4f65ad1ec496c8c72c57384036660f 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-05T00:25:42,995 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:25:42,995 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:25:42,996 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:25:42,996 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:25:42,996 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:25:42,996 INFO [regionserver/2113c16e5528:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,37749,1733358324931/2113c16e5528%2C37749%2C1733358324931.1733358340962 with entries=6, filesize=6.11 KB; new WAL /user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,37749,1733358324931/2113c16e5528%2C37749%2C1733358324931.1733358342978 2024-12-05T00:25:42,998 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38197 is added to blk_1073741840_1023 (size=6261) 2024-12-05T00:25:43,001 DEBUG [regionserver/2113c16e5528:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38813:38813)] 2024-12-05T00:25:43,001 DEBUG [regionserver/2113c16e5528:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,37749,1733358324931/2113c16e5528%2C37749%2C1733358324931.1733358325568 is not closed yet, will try archiving it next time 2024-12-05T00:25:43,001 DEBUG [regionserver/2113c16e5528:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,37749,1733358324931/2113c16e5528%2C37749%2C1733358324931.1733358340962 is not closed yet, will try archiving it next time 2024-12-05T00:25:43,014 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8b4f65ad1ec496c8c72c57384036660f/.tmp/info/1e93e6fff97b43198e72b93f061ce025 is 1080, key is row0002/info:/1733358338969/Put/seqid=0 2024-12-05T00:25:43,018 WARN [Thread-924 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741846_1029 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:35205 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-05T00:25:43,017 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-570305084_22 at /127.0.0.1:36468 [Receiving block BP-1914787219-172.17.0.2-1733358323877:blk_1073741846_1029] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7531cff9-2fe6-fdba-fd33-24a68d0721dc/cluster_d40ff393-c986-a4ef-b5d0-88d02549ef9a/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7531cff9-2fe6-fdba-fd33-24a68d0721dc/cluster_d40ff393-c986-a4ef-b5d0-88d02549ef9a/data/data6]'}, localName='127.0.0.1:38197', datanodeUuid='675b84f7-6e45-4dbb-ac2e-cec9da575f5c', xmitsInProgress=0}:Exception transferring block BP-1914787219-172.17.0.2-1733358323877:blk_1073741846_1029 to mirror 127.0.0.1:35205 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-05T00:25:43,018 WARN [Thread-924 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1914787219-172.17.0.2-1733358323877:blk_1073741846_1029 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38197,DS-e0ccde86-205f-4834-b07f-b6930e81338d,DISK], DatanodeInfoWithStorage[127.0.0.1:35205,DS-7b93ce7a-bb65-4ad6-a1d4-75b5c5ef17f8,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:35205,DS-7b93ce7a-bb65-4ad6-a1d4-75b5c5ef17f8,DISK]) is bad. 2024-12-05T00:25:43,018 WARN [Thread-924 {}] hdfs.DataStreamer(1850): Abandoning BP-1914787219-172.17.0.2-1733358323877:blk_1073741846_1029 2024-12-05T00:25:43,018 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-570305084_22 at /127.0.0.1:36468 [Receiving block BP-1914787219-172.17.0.2-1733358323877:blk_1073741846_1029] {}] datanode.BlockReceiver(316): Block 1073741846 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-05T00:25:43,018 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-570305084_22 at /127.0.0.1:36468 [Receiving block BP-1914787219-172.17.0.2-1733358323877:blk_1073741846_1029] {}] datanode.DataXceiver(331): 127.0.0.1:38197:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:36468 dst: /127.0.0.1:38197 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-05T00:25:43,019 WARN [Thread-924 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35205,DS-7b93ce7a-bb65-4ad6-a1d4-75b5c5ef17f8,DISK] 2024-12-05T00:25:43,020 WARN [Thread-924 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741847_1030 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-05T00:25:43,021 WARN [Thread-924 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1914787219-172.17.0.2-1733358323877:blk_1073741847_1030 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33065,DS-cf54d914-b4bf-4dee-84bc-66450ddb912c,DISK], DatanodeInfoWithStorage[127.0.0.1:38197,DS-e0ccde86-205f-4834-b07f-b6930e81338d,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33065,DS-cf54d914-b4bf-4dee-84bc-66450ddb912c,DISK]) is bad. 2024-12-05T00:25:43,021 WARN [Thread-924 {}] hdfs.DataStreamer(1850): Abandoning BP-1914787219-172.17.0.2-1733358323877:blk_1073741847_1030 2024-12-05T00:25:43,021 WARN [Thread-924 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33065,DS-cf54d914-b4bf-4dee-84bc-66450ddb912c,DISK] 2024-12-05T00:25:43,023 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-570305084_22 at /127.0.0.1:36474 [Receiving block BP-1914787219-172.17.0.2-1733358323877:blk_1073741848_1031] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7531cff9-2fe6-fdba-fd33-24a68d0721dc/cluster_d40ff393-c986-a4ef-b5d0-88d02549ef9a/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7531cff9-2fe6-fdba-fd33-24a68d0721dc/cluster_d40ff393-c986-a4ef-b5d0-88d02549ef9a/data/data6]'}, localName='127.0.0.1:38197', datanodeUuid='675b84f7-6e45-4dbb-ac2e-cec9da575f5c', xmitsInProgress=0}:Exception transferring block BP-1914787219-172.17.0.2-1733358323877:blk_1073741848_1031 to mirror 127.0.0.1:44281 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-05T00:25:43,023 WARN [Thread-924 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741848_1031 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:44281 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-05T00:25:43,024 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-570305084_22 at /127.0.0.1:36474 [Receiving block BP-1914787219-172.17.0.2-1733358323877:blk_1073741848_1031] {}] datanode.BlockReceiver(316): Block 1073741848 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-05T00:25:43,024 WARN [Thread-924 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1914787219-172.17.0.2-1733358323877:blk_1073741848_1031 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38197,DS-e0ccde86-205f-4834-b07f-b6930e81338d,DISK], DatanodeInfoWithStorage[127.0.0.1:44281,DS-0c68e330-c341-401e-ab8a-5ef4bf86e24e,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:44281,DS-0c68e330-c341-401e-ab8a-5ef4bf86e24e,DISK]) is bad. 2024-12-05T00:25:43,024 WARN [Thread-924 {}] hdfs.DataStreamer(1850): Abandoning BP-1914787219-172.17.0.2-1733358323877:blk_1073741848_1031 2024-12-05T00:25:43,024 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-570305084_22 at /127.0.0.1:36474 [Receiving block BP-1914787219-172.17.0.2-1733358323877:blk_1073741848_1031] {}] datanode.DataXceiver(331): 127.0.0.1:38197:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:36474 dst: /127.0.0.1:38197 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-05T00:25:43,024 WARN [Thread-924 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44281,DS-0c68e330-c341-401e-ab8a-5ef4bf86e24e,DISK] 2024-12-05T00:25:43,025 WARN [Thread-924 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741849_1032 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-05T00:25:43,026 WARN [Thread-924 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1914787219-172.17.0.2-1733358323877:blk_1073741849_1032 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34943,DS-32e7a19b-9b43-45a3-b613-bb1258e4827b,DISK], DatanodeInfoWithStorage[127.0.0.1:38197,DS-e0ccde86-205f-4834-b07f-b6930e81338d,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34943,DS-32e7a19b-9b43-45a3-b613-bb1258e4827b,DISK]) is bad. 2024-12-05T00:25:43,026 WARN [Thread-924 {}] hdfs.DataStreamer(1850): Abandoning BP-1914787219-172.17.0.2-1733358323877:blk_1073741849_1032 2024-12-05T00:25:43,026 WARN [Thread-924 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34943,DS-32e7a19b-9b43-45a3-b613-bb1258e4827b,DISK] 2024-12-05T00:25:43,027 WARN [IPC Server handler 0 on default port 41383 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-05T00:25:43,027 WARN [IPC Server handler 0 on default port 41383 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-05T00:25:43,027 WARN [IPC Server handler 0 on default port 41383 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-05T00:25:43,030 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38197 is added to blk_1073741850_1033 (size=10347) 2024-12-05T00:25:43,052 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35205,DS-7b93ce7a-bb65-4ad6-a1d4-75b5c5ef17f8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-05T00:25:43,399 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,37749,1733358324931/2113c16e5528%2C37749%2C1733358324931.1733358325568 is not closed yet, will try archiving it next time 2024-12-05T00:25:43,431 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8b4f65ad1ec496c8c72c57384036660f/.tmp/info/1e93e6fff97b43198e72b93f061ce025 2024-12-05T00:25:43,438 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8b4f65ad1ec496c8c72c57384036660f/.tmp/info/1e93e6fff97b43198e72b93f061ce025 as hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8b4f65ad1ec496c8c72c57384036660f/info/1e93e6fff97b43198e72b93f061ce025 2024-12-05T00:25:43,444 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8b4f65ad1ec496c8c72c57384036660f/info/1e93e6fff97b43198e72b93f061ce025, entries=5, sequenceid=11, filesize=10.1 K 2024-12-05T00:25:43,445 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=9.45 KB/9681 for 8b4f65ad1ec496c8c72c57384036660f in 452ms, sequenceid=11, compaction requested=false 2024-12-05T00:25:43,445 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 8b4f65ad1ec496c8c72c57384036660f: 2024-12-05T00:25:43,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37749 {}] regionserver.HRegion(8855): Flush requested on 8b4f65ad1ec496c8c72c57384036660f 2024-12-05T00:25:43,619 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 8b4f65ad1ec496c8c72c57384036660f 1/1 column families, dataSize=10.50 KB heapSize=11.50 KB 2024-12-05T00:25:43,623 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8b4f65ad1ec496c8c72c57384036660f/.tmp/info/329e30068fe2419b8a20b0a3c7086f73 is 1080, key is row0007/info:/1733358342994/Put/seqid=0 2024-12-05T00:25:43,625 WARN [Thread-931 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741851_1034 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-05T00:25:43,625 WARN [Thread-931 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1914787219-172.17.0.2-1733358323877:blk_1073741851_1034 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44281,DS-0c68e330-c341-401e-ab8a-5ef4bf86e24e,DISK], DatanodeInfoWithStorage[127.0.0.1:34943,DS-32e7a19b-9b43-45a3-b613-bb1258e4827b,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44281,DS-0c68e330-c341-401e-ab8a-5ef4bf86e24e,DISK]) is bad. 2024-12-05T00:25:43,625 WARN [Thread-931 {}] hdfs.DataStreamer(1850): Abandoning BP-1914787219-172.17.0.2-1733358323877:blk_1073741851_1034 2024-12-05T00:25:43,626 WARN [Thread-931 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44281,DS-0c68e330-c341-401e-ab8a-5ef4bf86e24e,DISK] 2024-12-05T00:25:43,628 WARN [Thread-931 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741852_1035 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:33065 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-05T00:25:43,628 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-570305084_22 at /127.0.0.1:36514 [Receiving block BP-1914787219-172.17.0.2-1733358323877:blk_1073741852_1035] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7531cff9-2fe6-fdba-fd33-24a68d0721dc/cluster_d40ff393-c986-a4ef-b5d0-88d02549ef9a/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7531cff9-2fe6-fdba-fd33-24a68d0721dc/cluster_d40ff393-c986-a4ef-b5d0-88d02549ef9a/data/data6]'}, localName='127.0.0.1:38197', datanodeUuid='675b84f7-6e45-4dbb-ac2e-cec9da575f5c', xmitsInProgress=0}:Exception transferring block BP-1914787219-172.17.0.2-1733358323877:blk_1073741852_1035 to mirror 127.0.0.1:33065 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-05T00:25:43,628 WARN [Thread-931 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1914787219-172.17.0.2-1733358323877:blk_1073741852_1035 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38197,DS-e0ccde86-205f-4834-b07f-b6930e81338d,DISK], DatanodeInfoWithStorage[127.0.0.1:33065,DS-cf54d914-b4bf-4dee-84bc-66450ddb912c,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:33065,DS-cf54d914-b4bf-4dee-84bc-66450ddb912c,DISK]) is bad. 2024-12-05T00:25:43,628 WARN [Thread-931 {}] hdfs.DataStreamer(1850): Abandoning BP-1914787219-172.17.0.2-1733358323877:blk_1073741852_1035 2024-12-05T00:25:43,628 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-570305084_22 at /127.0.0.1:36514 [Receiving block BP-1914787219-172.17.0.2-1733358323877:blk_1073741852_1035] {}] datanode.BlockReceiver(316): Block 1073741852 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-05T00:25:43,628 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-570305084_22 at /127.0.0.1:36514 [Receiving block BP-1914787219-172.17.0.2-1733358323877:blk_1073741852_1035] {}] datanode.DataXceiver(331): 127.0.0.1:38197:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:36514 dst: /127.0.0.1:38197 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-05T00:25:43,629 WARN [Thread-931 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33065,DS-cf54d914-b4bf-4dee-84bc-66450ddb912c,DISK] 2024-12-05T00:25:43,630 WARN [Thread-931 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741853_1036 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-05T00:25:43,631 WARN [Thread-931 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1914787219-172.17.0.2-1733358323877:blk_1073741853_1036 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35205,DS-7b93ce7a-bb65-4ad6-a1d4-75b5c5ef17f8,DISK], DatanodeInfoWithStorage[127.0.0.1:34943,DS-32e7a19b-9b43-45a3-b613-bb1258e4827b,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35205,DS-7b93ce7a-bb65-4ad6-a1d4-75b5c5ef17f8,DISK]) is bad. 2024-12-05T00:25:43,631 WARN [Thread-931 {}] hdfs.DataStreamer(1850): Abandoning BP-1914787219-172.17.0.2-1733358323877:blk_1073741853_1036 2024-12-05T00:25:43,631 WARN [Thread-931 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35205,DS-7b93ce7a-bb65-4ad6-a1d4-75b5c5ef17f8,DISK] 2024-12-05T00:25:43,633 WARN [Thread-931 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741854_1037 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:34943 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-05T00:25:43,633 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-570305084_22 at /127.0.0.1:36524 [Receiving block BP-1914787219-172.17.0.2-1733358323877:blk_1073741854_1037] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7531cff9-2fe6-fdba-fd33-24a68d0721dc/cluster_d40ff393-c986-a4ef-b5d0-88d02549ef9a/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7531cff9-2fe6-fdba-fd33-24a68d0721dc/cluster_d40ff393-c986-a4ef-b5d0-88d02549ef9a/data/data6]'}, localName='127.0.0.1:38197', datanodeUuid='675b84f7-6e45-4dbb-ac2e-cec9da575f5c', xmitsInProgress=0}:Exception transferring block BP-1914787219-172.17.0.2-1733358323877:blk_1073741854_1037 to mirror 127.0.0.1:34943 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-05T00:25:43,633 WARN [Thread-931 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1914787219-172.17.0.2-1733358323877:blk_1073741854_1037 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38197,DS-e0ccde86-205f-4834-b07f-b6930e81338d,DISK], DatanodeInfoWithStorage[127.0.0.1:34943,DS-32e7a19b-9b43-45a3-b613-bb1258e4827b,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:34943,DS-32e7a19b-9b43-45a3-b613-bb1258e4827b,DISK]) is bad. 2024-12-05T00:25:43,633 WARN [Thread-931 {}] hdfs.DataStreamer(1850): Abandoning BP-1914787219-172.17.0.2-1733358323877:blk_1073741854_1037 2024-12-05T00:25:43,633 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-570305084_22 at /127.0.0.1:36524 [Receiving block BP-1914787219-172.17.0.2-1733358323877:blk_1073741854_1037] {}] datanode.BlockReceiver(316): Block 1073741854 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-05T00:25:43,633 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-570305084_22 at /127.0.0.1:36524 [Receiving block BP-1914787219-172.17.0.2-1733358323877:blk_1073741854_1037] {}] datanode.DataXceiver(331): 127.0.0.1:38197:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:36524 dst: /127.0.0.1:38197 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-05T00:25:43,634 WARN [Thread-931 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34943,DS-32e7a19b-9b43-45a3-b613-bb1258e4827b,DISK] 2024-12-05T00:25:43,634 WARN [IPC Server handler 3 on default port 41383 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-05T00:25:43,635 WARN [IPC Server handler 3 on default port 41383 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-05T00:25:43,635 WARN [IPC Server handler 3 on default port 41383 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-05T00:25:43,637 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38197 is added to blk_1073741855_1038 (size=12506) 2024-12-05T00:25:44,038 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=10.50 KB at sequenceid=24 (bloomFilter=true), to=hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8b4f65ad1ec496c8c72c57384036660f/.tmp/info/329e30068fe2419b8a20b0a3c7086f73 2024-12-05T00:25:44,045 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8b4f65ad1ec496c8c72c57384036660f/.tmp/info/329e30068fe2419b8a20b0a3c7086f73 as hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8b4f65ad1ec496c8c72c57384036660f/info/329e30068fe2419b8a20b0a3c7086f73 2024-12-05T00:25:44,051 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8b4f65ad1ec496c8c72c57384036660f/info/329e30068fe2419b8a20b0a3c7086f73, entries=7, sequenceid=24, filesize=12.2 K 2024-12-05T00:25:44,052 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~10.50 KB/10756, heapSize ~11.48 KB/11760, currentSize=2.10 KB/2150 for 8b4f65ad1ec496c8c72c57384036660f in 433ms, sequenceid=24, compaction requested=false 2024-12-05T00:25:44,052 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 8b4f65ad1ec496c8c72c57384036660f: 2024-12-05T00:25:44,052 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=22.3 K, sizeToCheck=16.0 K 2024-12-05T00:25:44,052 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-05T00:25:44,052 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8b4f65ad1ec496c8c72c57384036660f/info/329e30068fe2419b8a20b0a3c7086f73 because midkey is the same as first or last row 2024-12-05T00:25:44,173 INFO [regionserver/2113c16e5528:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35205,DS-7b93ce7a-bb65-4ad6-a1d4-75b5c5ef17f8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-05T00:25:45,001 INFO [regionserver/2113c16e5528:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35205,DS-7b93ce7a-bb65-4ad6-a1d4-75b5c5ef17f8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-05T00:25:45,001 WARN [regionserver/2113c16e5528:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38197,DS-e0ccde86-205f-4834-b07f-b6930e81338d,DISK]] 2024-12-05T00:25:45,001 DEBUG [regionserver/2113c16e5528:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 2113c16e5528%2C37749%2C1733358324931:(num 1733358342978) roll requested 2024-12-05T00:25:45,002 INFO [regionserver/2113c16e5528:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 2113c16e5528%2C37749%2C1733358324931.1733358345001 2024-12-05T00:25:45,004 WARN [Thread-937 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741856_1039 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-05T00:25:45,005 WARN [Thread-937 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1914787219-172.17.0.2-1733358323877:blk_1073741856_1039 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35205,DS-7b93ce7a-bb65-4ad6-a1d4-75b5c5ef17f8,DISK], DatanodeInfoWithStorage[127.0.0.1:44281,DS-0c68e330-c341-401e-ab8a-5ef4bf86e24e,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35205,DS-7b93ce7a-bb65-4ad6-a1d4-75b5c5ef17f8,DISK]) is bad. 2024-12-05T00:25:45,005 WARN [Thread-937 {}] hdfs.DataStreamer(1850): Abandoning BP-1914787219-172.17.0.2-1733358323877:blk_1073741856_1039 2024-12-05T00:25:45,005 WARN [Thread-937 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35205,DS-7b93ce7a-bb65-4ad6-a1d4-75b5c5ef17f8,DISK] 2024-12-05T00:25:45,006 WARN [Thread-937 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741857_1040 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-05T00:25:45,007 WARN [Thread-937 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1914787219-172.17.0.2-1733358323877:blk_1073741857_1040 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44281,DS-0c68e330-c341-401e-ab8a-5ef4bf86e24e,DISK], DatanodeInfoWithStorage[127.0.0.1:38197,DS-e0ccde86-205f-4834-b07f-b6930e81338d,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44281,DS-0c68e330-c341-401e-ab8a-5ef4bf86e24e,DISK]) is bad. 2024-12-05T00:25:45,007 WARN [Thread-937 {}] hdfs.DataStreamer(1850): Abandoning BP-1914787219-172.17.0.2-1733358323877:blk_1073741857_1040 2024-12-05T00:25:45,007 WARN [Thread-937 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44281,DS-0c68e330-c341-401e-ab8a-5ef4bf86e24e,DISK] 2024-12-05T00:25:45,008 WARN [Thread-937 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741858_1041 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-05T00:25:45,008 WARN [Thread-937 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1914787219-172.17.0.2-1733358323877:blk_1073741858_1041 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33065,DS-cf54d914-b4bf-4dee-84bc-66450ddb912c,DISK], DatanodeInfoWithStorage[127.0.0.1:34943,DS-32e7a19b-9b43-45a3-b613-bb1258e4827b,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33065,DS-cf54d914-b4bf-4dee-84bc-66450ddb912c,DISK]) is bad. 2024-12-05T00:25:45,008 WARN [Thread-937 {}] hdfs.DataStreamer(1850): Abandoning BP-1914787219-172.17.0.2-1733358323877:blk_1073741858_1041 2024-12-05T00:25:45,009 WARN [Thread-937 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33065,DS-cf54d914-b4bf-4dee-84bc-66450ddb912c,DISK] 2024-12-05T00:25:45,011 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-570305084_22 at /127.0.0.1:36538 [Receiving block BP-1914787219-172.17.0.2-1733358323877:blk_1073741859_1042] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7531cff9-2fe6-fdba-fd33-24a68d0721dc/cluster_d40ff393-c986-a4ef-b5d0-88d02549ef9a/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7531cff9-2fe6-fdba-fd33-24a68d0721dc/cluster_d40ff393-c986-a4ef-b5d0-88d02549ef9a/data/data6]'}, localName='127.0.0.1:38197', datanodeUuid='675b84f7-6e45-4dbb-ac2e-cec9da575f5c', xmitsInProgress=0}:Exception transferring block BP-1914787219-172.17.0.2-1733358323877:blk_1073741859_1042 to mirror 127.0.0.1:34943 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-05T00:25:45,011 WARN [Thread-937 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741859_1042 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:34943 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-05T00:25:45,011 WARN [Thread-937 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1914787219-172.17.0.2-1733358323877:blk_1073741859_1042 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38197,DS-e0ccde86-205f-4834-b07f-b6930e81338d,DISK], DatanodeInfoWithStorage[127.0.0.1:34943,DS-32e7a19b-9b43-45a3-b613-bb1258e4827b,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:34943,DS-32e7a19b-9b43-45a3-b613-bb1258e4827b,DISK]) is bad. 2024-12-05T00:25:45,011 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-570305084_22 at /127.0.0.1:36538 [Receiving block BP-1914787219-172.17.0.2-1733358323877:blk_1073741859_1042] {}] datanode.BlockReceiver(316): Block 1073741859 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-12-05T00:25:45,011 WARN [Thread-937 {}] hdfs.DataStreamer(1850): Abandoning BP-1914787219-172.17.0.2-1733358323877:blk_1073741859_1042 2024-12-05T00:25:45,011 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-570305084_22 at /127.0.0.1:36538 [Receiving block BP-1914787219-172.17.0.2-1733358323877:blk_1073741859_1042] {}] datanode.DataXceiver(331): 127.0.0.1:38197:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:36538 dst: /127.0.0.1:38197 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-05T00:25:45,012 WARN [Thread-937 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34943,DS-32e7a19b-9b43-45a3-b613-bb1258e4827b,DISK] 2024-12-05T00:25:45,012 WARN [IPC Server handler 1 on default port 41383 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-05T00:25:45,012 WARN [IPC Server handler 1 on default port 41383 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-05T00:25:45,012 WARN [IPC Server handler 1 on default port 41383 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-05T00:25:45,015 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:25:45,015 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:25:45,015 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:25:45,015 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:25:45,015 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:25:45,015 INFO [regionserver/2113c16e5528:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,37749,1733358324931/2113c16e5528%2C37749%2C1733358324931.1733358342978 with entries=18, filesize=18.21 KB; new WAL /user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,37749,1733358324931/2113c16e5528%2C37749%2C1733358324931.1733358345001 2024-12-05T00:25:45,016 DEBUG [regionserver/2113c16e5528:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38813:38813)] 2024-12-05T00:25:45,016 DEBUG [regionserver/2113c16e5528:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,37749,1733358324931/2113c16e5528%2C37749%2C1733358324931.1733358325568 is not closed yet, will try archiving it next time 2024-12-05T00:25:45,016 DEBUG [regionserver/2113c16e5528:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,37749,1733358324931/2113c16e5528%2C37749%2C1733358324931.1733358342978 is not closed yet, will try archiving it next time 2024-12-05T00:25:45,017 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,37749,1733358324931/2113c16e5528%2C37749%2C1733358324931.1733358336953 to hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/oldWALs/2113c16e5528%2C37749%2C1733358324931.1733358336953 2024-12-05T00:25:45,017 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38197 is added to blk_1073741845_1028 (size=18655) 2024-12-05T00:25:45,018 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,37749,1733358324931/2113c16e5528%2C37749%2C1733358324931.1733358340962 to hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/oldWALs/2113c16e5528%2C37749%2C1733358324931.1733358340962 2024-12-05T00:25:45,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37749 {}] regionserver.HRegion(8855): Flush requested on 8b4f65ad1ec496c8c72c57384036660f 2024-12-05T00:25:45,039 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 8b4f65ad1ec496c8c72c57384036660f 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-12-05T00:25:45,045 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8b4f65ad1ec496c8c72c57384036660f/.tmp/info/6618bd3565c54b7a8b97a2d259c6df69 is 1079, key is tmprow/info:/1733358345038/Put/seqid=0 2024-12-05T00:25:45,047 WARN [Thread-942 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741861_1044 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-05T00:25:45,048 WARN [Thread-942 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1914787219-172.17.0.2-1733358323877:blk_1073741861_1044 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34943,DS-32e7a19b-9b43-45a3-b613-bb1258e4827b,DISK], DatanodeInfoWithStorage[127.0.0.1:33065,DS-cf54d914-b4bf-4dee-84bc-66450ddb912c,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34943,DS-32e7a19b-9b43-45a3-b613-bb1258e4827b,DISK]) is bad. 2024-12-05T00:25:45,048 WARN [Thread-942 {}] hdfs.DataStreamer(1850): Abandoning BP-1914787219-172.17.0.2-1733358323877:blk_1073741861_1044 2024-12-05T00:25:45,048 WARN [Thread-942 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34943,DS-32e7a19b-9b43-45a3-b613-bb1258e4827b,DISK] 2024-12-05T00:25:45,050 WARN [Thread-942 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741862_1045 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-05T00:25:45,050 WARN [Thread-942 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1914787219-172.17.0.2-1733358323877:blk_1073741862_1045 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44281,DS-0c68e330-c341-401e-ab8a-5ef4bf86e24e,DISK], DatanodeInfoWithStorage[127.0.0.1:38197,DS-e0ccde86-205f-4834-b07f-b6930e81338d,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44281,DS-0c68e330-c341-401e-ab8a-5ef4bf86e24e,DISK]) is bad. 2024-12-05T00:25:45,050 WARN [Thread-942 {}] hdfs.DataStreamer(1850): Abandoning BP-1914787219-172.17.0.2-1733358323877:blk_1073741862_1045 2024-12-05T00:25:45,050 WARN [Thread-942 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44281,DS-0c68e330-c341-401e-ab8a-5ef4bf86e24e,DISK] 2024-12-05T00:25:45,052 WARN [Thread-942 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741863_1046 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-05T00:25:45,052 WARN [Thread-942 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1914787219-172.17.0.2-1733358323877:blk_1073741863_1046 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33065,DS-cf54d914-b4bf-4dee-84bc-66450ddb912c,DISK], DatanodeInfoWithStorage[127.0.0.1:38197,DS-e0ccde86-205f-4834-b07f-b6930e81338d,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33065,DS-cf54d914-b4bf-4dee-84bc-66450ddb912c,DISK]) is bad. 2024-12-05T00:25:45,052 WARN [Thread-942 {}] hdfs.DataStreamer(1850): Abandoning BP-1914787219-172.17.0.2-1733358323877:blk_1073741863_1046 2024-12-05T00:25:45,052 WARN [Thread-942 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33065,DS-cf54d914-b4bf-4dee-84bc-66450ddb912c,DISK] 2024-12-05T00:25:45,052 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35205,DS-7b93ce7a-bb65-4ad6-a1d4-75b5c5ef17f8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-05T00:25:45,053 WARN [Thread-942 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741864_1047 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-05T00:25:45,054 WARN [Thread-942 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1914787219-172.17.0.2-1733358323877:blk_1073741864_1047 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35205,DS-7b93ce7a-bb65-4ad6-a1d4-75b5c5ef17f8,DISK], DatanodeInfoWithStorage[127.0.0.1:38197,DS-e0ccde86-205f-4834-b07f-b6930e81338d,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35205,DS-7b93ce7a-bb65-4ad6-a1d4-75b5c5ef17f8,DISK]) is bad. 2024-12-05T00:25:45,054 WARN [Thread-942 {}] hdfs.DataStreamer(1850): Abandoning BP-1914787219-172.17.0.2-1733358323877:blk_1073741864_1047 2024-12-05T00:25:45,054 WARN [Thread-942 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35205,DS-7b93ce7a-bb65-4ad6-a1d4-75b5c5ef17f8,DISK] 2024-12-05T00:25:45,055 WARN [IPC Server handler 3 on default port 41383 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-05T00:25:45,055 WARN [IPC Server handler 3 on default port 41383 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-05T00:25:45,055 WARN [IPC Server handler 3 on default port 41383 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-05T00:25:45,058 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38197 is added to blk_1073741865_1048 (size=6027) 2024-12-05T00:25:45,418 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,37749,1733358324931/2113c16e5528%2C37749%2C1733358324931.1733358325568 is not closed yet, will try archiving it next time 2024-12-05T00:25:45,459 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=34 (bloomFilter=true), to=hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8b4f65ad1ec496c8c72c57384036660f/.tmp/info/6618bd3565c54b7a8b97a2d259c6df69 2024-12-05T00:25:45,465 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8b4f65ad1ec496c8c72c57384036660f/.tmp/info/6618bd3565c54b7a8b97a2d259c6df69 as hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8b4f65ad1ec496c8c72c57384036660f/info/6618bd3565c54b7a8b97a2d259c6df69 2024-12-05T00:25:45,472 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8b4f65ad1ec496c8c72c57384036660f/info/6618bd3565c54b7a8b97a2d259c6df69, entries=1, sequenceid=34, filesize=5.9 K 2024-12-05T00:25:45,473 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=2.10 KB/2150 for 8b4f65ad1ec496c8c72c57384036660f in 434ms, sequenceid=34, compaction requested=true 2024-12-05T00:25:45,473 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 8b4f65ad1ec496c8c72c57384036660f: 2024-12-05T00:25:45,473 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=28.2 K, sizeToCheck=16.0 K 2024-12-05T00:25:45,473 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-05T00:25:45,473 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8b4f65ad1ec496c8c72c57384036660f/info/329e30068fe2419b8a20b0a3c7086f73 because midkey is the same as first or last row 2024-12-05T00:25:45,474 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8b4f65ad1ec496c8c72c57384036660f:info, priority=-2147483648, current under compaction store size is 1 2024-12-05T00:25:45,474 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-05T00:25:45,474 DEBUG [RS:0;2113c16e5528:37749-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-05T00:25:45,475 DEBUG [RS:0;2113c16e5528:37749-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 28880 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-05T00:25:45,475 DEBUG [RS:0;2113c16e5528:37749-shortCompactions-0 {}] regionserver.HStore(1541): 8b4f65ad1ec496c8c72c57384036660f/info is initiating minor compaction (all files) 2024-12-05T00:25:45,475 INFO [RS:0;2113c16e5528:37749-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 8b4f65ad1ec496c8c72c57384036660f/info in TestLogRolling-testLogRollOnDatanodeDeath,,1733358326207.8b4f65ad1ec496c8c72c57384036660f. 2024-12-05T00:25:45,475 INFO [RS:0;2113c16e5528:37749-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8b4f65ad1ec496c8c72c57384036660f/info/1e93e6fff97b43198e72b93f061ce025, hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8b4f65ad1ec496c8c72c57384036660f/info/329e30068fe2419b8a20b0a3c7086f73, hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8b4f65ad1ec496c8c72c57384036660f/info/6618bd3565c54b7a8b97a2d259c6df69] into tmpdir=hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8b4f65ad1ec496c8c72c57384036660f/.tmp, totalSize=28.2 K 2024-12-05T00:25:45,476 DEBUG [RS:0;2113c16e5528:37749-shortCompactions-0 {}] compactions.Compactor(225): Compacting 1e93e6fff97b43198e72b93f061ce025, keycount=5, bloomtype=ROW, size=10.1 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1733358338969 2024-12-05T00:25:45,476 DEBUG [RS:0;2113c16e5528:37749-shortCompactions-0 {}] compactions.Compactor(225): Compacting 329e30068fe2419b8a20b0a3c7086f73, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=24, earliestPutTs=1733358342994 2024-12-05T00:25:45,476 DEBUG [RS:0;2113c16e5528:37749-shortCompactions-0 {}] compactions.Compactor(225): Compacting 6618bd3565c54b7a8b97a2d259c6df69, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=34, earliestPutTs=1733358345038 2024-12-05T00:25:45,489 INFO [RS:0;2113c16e5528:37749-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8b4f65ad1ec496c8c72c57384036660f#info#compaction#21 average throughput is 4.10 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-05T00:25:45,489 DEBUG [RS:0;2113c16e5528:37749-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8b4f65ad1ec496c8c72c57384036660f/.tmp/info/d7bab672cc8f4616ab39fbc10f45d899 is 1080, key is row0002/info:/1733358338969/Put/seqid=0 2024-12-05T00:25:45,491 WARN [Thread-948 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741866_1049 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-05T00:25:45,491 WARN [Thread-948 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1914787219-172.17.0.2-1733358323877:blk_1073741866_1049 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33065,DS-cf54d914-b4bf-4dee-84bc-66450ddb912c,DISK], DatanodeInfoWithStorage[127.0.0.1:35205,DS-7b93ce7a-bb65-4ad6-a1d4-75b5c5ef17f8,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33065,DS-cf54d914-b4bf-4dee-84bc-66450ddb912c,DISK]) is bad. 2024-12-05T00:25:45,491 WARN [Thread-948 {}] hdfs.DataStreamer(1850): Abandoning BP-1914787219-172.17.0.2-1733358323877:blk_1073741866_1049 2024-12-05T00:25:45,491 WARN [Thread-948 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33065,DS-cf54d914-b4bf-4dee-84bc-66450ddb912c,DISK] 2024-12-05T00:25:45,492 WARN [Thread-948 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741867_1050 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-05T00:25:45,493 WARN [Thread-948 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1914787219-172.17.0.2-1733358323877:blk_1073741867_1050 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44281,DS-0c68e330-c341-401e-ab8a-5ef4bf86e24e,DISK], DatanodeInfoWithStorage[127.0.0.1:38197,DS-e0ccde86-205f-4834-b07f-b6930e81338d,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44281,DS-0c68e330-c341-401e-ab8a-5ef4bf86e24e,DISK]) is bad. 2024-12-05T00:25:45,493 WARN [Thread-948 {}] hdfs.DataStreamer(1850): Abandoning BP-1914787219-172.17.0.2-1733358323877:blk_1073741867_1050 2024-12-05T00:25:45,493 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@6dbd1900[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:38197, datanodeUuid=675b84f7-6e45-4dbb-ac2e-cec9da575f5c, infoPort=38813, infoSecurePort=0, ipcPort=39191, storageInfo=lv=-57;cid=testClusterID;nsid=355484051;c=1733358323877):Failed to transfer BP-1914787219-172.17.0.2-1733358323877:blk_1073741850_1033 to 127.0.0.1:44281 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-05T00:25:45,493 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@637afb1f[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:38197, datanodeUuid=675b84f7-6e45-4dbb-ac2e-cec9da575f5c, infoPort=38813, infoSecurePort=0, ipcPort=39191, storageInfo=lv=-57;cid=testClusterID;nsid=355484051;c=1733358323877):Failed to transfer BP-1914787219-172.17.0.2-1733358323877:blk_1073741840_1023 to 127.0.0.1:44281 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-05T00:25:45,493 WARN [Thread-948 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44281,DS-0c68e330-c341-401e-ab8a-5ef4bf86e24e,DISK] 2024-12-05T00:25:45,494 WARN [Thread-948 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741868_1051 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-05T00:25:45,495 WARN [Thread-948 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1914787219-172.17.0.2-1733358323877:blk_1073741868_1051 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34943,DS-32e7a19b-9b43-45a3-b613-bb1258e4827b,DISK], DatanodeInfoWithStorage[127.0.0.1:35205,DS-7b93ce7a-bb65-4ad6-a1d4-75b5c5ef17f8,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34943,DS-32e7a19b-9b43-45a3-b613-bb1258e4827b,DISK]) is bad. 2024-12-05T00:25:45,495 WARN [Thread-948 {}] hdfs.DataStreamer(1850): Abandoning BP-1914787219-172.17.0.2-1733358323877:blk_1073741868_1051 2024-12-05T00:25:45,495 WARN [Thread-948 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34943,DS-32e7a19b-9b43-45a3-b613-bb1258e4827b,DISK] 2024-12-05T00:25:45,496 WARN [Thread-948 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741869_1052 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-05T00:25:45,496 WARN [Thread-948 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1914787219-172.17.0.2-1733358323877:blk_1073741869_1052 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35205,DS-7b93ce7a-bb65-4ad6-a1d4-75b5c5ef17f8,DISK], DatanodeInfoWithStorage[127.0.0.1:38197,DS-e0ccde86-205f-4834-b07f-b6930e81338d,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35205,DS-7b93ce7a-bb65-4ad6-a1d4-75b5c5ef17f8,DISK]) is bad. 2024-12-05T00:25:45,496 WARN [Thread-948 {}] hdfs.DataStreamer(1850): Abandoning BP-1914787219-172.17.0.2-1733358323877:blk_1073741869_1052 2024-12-05T00:25:45,497 WARN [Thread-948 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35205,DS-7b93ce7a-bb65-4ad6-a1d4-75b5c5ef17f8,DISK] 2024-12-05T00:25:45,497 WARN [IPC Server handler 0 on default port 41383 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-05T00:25:45,497 WARN [IPC Server handler 0 on default port 41383 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-05T00:25:45,497 WARN [IPC Server handler 0 on default port 41383 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-05T00:25:45,500 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38197 is added to blk_1073741870_1053 (size=17994) 2024-12-05T00:25:45,907 DEBUG [RS:0;2113c16e5528:37749-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8b4f65ad1ec496c8c72c57384036660f/.tmp/info/d7bab672cc8f4616ab39fbc10f45d899 as hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8b4f65ad1ec496c8c72c57384036660f/info/d7bab672cc8f4616ab39fbc10f45d899 2024-12-05T00:25:45,914 INFO [RS:0;2113c16e5528:37749-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 8b4f65ad1ec496c8c72c57384036660f/info of 8b4f65ad1ec496c8c72c57384036660f into d7bab672cc8f4616ab39fbc10f45d899(size=17.6 K), total size for store is 17.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-05T00:25:45,914 DEBUG [RS:0;2113c16e5528:37749-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 8b4f65ad1ec496c8c72c57384036660f: 2024-12-05T00:25:45,914 INFO [RS:0;2113c16e5528:37749-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRollOnDatanodeDeath,,1733358326207.8b4f65ad1ec496c8c72c57384036660f., storeName=8b4f65ad1ec496c8c72c57384036660f/info, priority=13, startTime=1733358345473; duration=0sec 2024-12-05T00:25:45,914 DEBUG [RS:0;2113c16e5528:37749-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-12-05T00:25:45,914 DEBUG [RS:0;2113c16e5528:37749-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-05T00:25:45,915 DEBUG [RS:0;2113c16e5528:37749-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8b4f65ad1ec496c8c72c57384036660f/info/d7bab672cc8f4616ab39fbc10f45d899 because midkey is the same as first or last row 2024-12-05T00:25:45,915 DEBUG [RS:0;2113c16e5528:37749-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-12-05T00:25:45,915 DEBUG [RS:0;2113c16e5528:37749-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-05T00:25:45,915 DEBUG [RS:0;2113c16e5528:37749-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8b4f65ad1ec496c8c72c57384036660f/info/d7bab672cc8f4616ab39fbc10f45d899 because midkey is the same as first or last row 2024-12-05T00:25:45,915 DEBUG [RS:0;2113c16e5528:37749-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-12-05T00:25:45,915 DEBUG [RS:0;2113c16e5528:37749-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-05T00:25:45,915 DEBUG [RS:0;2113c16e5528:37749-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8b4f65ad1ec496c8c72c57384036660f/info/d7bab672cc8f4616ab39fbc10f45d899 because midkey is the same as first or last row 2024-12-05T00:25:45,915 DEBUG [RS:0;2113c16e5528:37749-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-05T00:25:45,915 DEBUG [RS:0;2113c16e5528:37749-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8b4f65ad1ec496c8c72c57384036660f:info 2024-12-05T00:25:46,173 INFO [regionserver/2113c16e5528:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35205,DS-7b93ce7a-bb65-4ad6-a1d4-75b5c5ef17f8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-05T00:25:46,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37749 {}] regionserver.HRegion(8855): Flush requested on 8b4f65ad1ec496c8c72c57384036660f 2024-12-05T00:25:46,459 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 8b4f65ad1ec496c8c72c57384036660f 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-12-05T00:25:46,464 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8b4f65ad1ec496c8c72c57384036660f/.tmp/info/9dce3bd6c22c4ef7a5d699c43f5de131 is 1079, key is tmprow/info:/1733358346458/Put/seqid=0 2024-12-05T00:25:46,466 WARN [Thread-956 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741871_1054 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-05T00:25:46,466 WARN [Thread-956 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1914787219-172.17.0.2-1733358323877:blk_1073741871_1054 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35205,DS-7b93ce7a-bb65-4ad6-a1d4-75b5c5ef17f8,DISK], DatanodeInfoWithStorage[127.0.0.1:34943,DS-32e7a19b-9b43-45a3-b613-bb1258e4827b,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35205,DS-7b93ce7a-bb65-4ad6-a1d4-75b5c5ef17f8,DISK]) is bad. 2024-12-05T00:25:46,466 WARN [Thread-956 {}] hdfs.DataStreamer(1850): Abandoning BP-1914787219-172.17.0.2-1733358323877:blk_1073741871_1054 2024-12-05T00:25:46,466 WARN [Thread-956 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35205,DS-7b93ce7a-bb65-4ad6-a1d4-75b5c5ef17f8,DISK] 2024-12-05T00:25:46,469 WARN [Thread-956 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741872_1055 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:33065 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-05T00:25:46,469 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-570305084_22 at /127.0.0.1:36582 [Receiving block BP-1914787219-172.17.0.2-1733358323877:blk_1073741872_1055] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7531cff9-2fe6-fdba-fd33-24a68d0721dc/cluster_d40ff393-c986-a4ef-b5d0-88d02549ef9a/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7531cff9-2fe6-fdba-fd33-24a68d0721dc/cluster_d40ff393-c986-a4ef-b5d0-88d02549ef9a/data/data6]'}, localName='127.0.0.1:38197', datanodeUuid='675b84f7-6e45-4dbb-ac2e-cec9da575f5c', xmitsInProgress=0}:Exception transferring block BP-1914787219-172.17.0.2-1733358323877:blk_1073741872_1055 to mirror 127.0.0.1:33065 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-05T00:25:46,469 WARN [Thread-956 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1914787219-172.17.0.2-1733358323877:blk_1073741872_1055 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38197,DS-e0ccde86-205f-4834-b07f-b6930e81338d,DISK], DatanodeInfoWithStorage[127.0.0.1:33065,DS-cf54d914-b4bf-4dee-84bc-66450ddb912c,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:33065,DS-cf54d914-b4bf-4dee-84bc-66450ddb912c,DISK]) is bad. 2024-12-05T00:25:46,469 WARN [Thread-956 {}] hdfs.DataStreamer(1850): Abandoning BP-1914787219-172.17.0.2-1733358323877:blk_1073741872_1055 2024-12-05T00:25:46,469 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-570305084_22 at /127.0.0.1:36582 [Receiving block BP-1914787219-172.17.0.2-1733358323877:blk_1073741872_1055] {}] datanode.BlockReceiver(316): Block 1073741872 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-05T00:25:46,469 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-570305084_22 at /127.0.0.1:36582 [Receiving block BP-1914787219-172.17.0.2-1733358323877:blk_1073741872_1055] {}] datanode.DataXceiver(331): 127.0.0.1:38197:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:36582 dst: /127.0.0.1:38197 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-05T00:25:46,469 WARN [Thread-956 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33065,DS-cf54d914-b4bf-4dee-84bc-66450ddb912c,DISK] 2024-12-05T00:25:46,470 WARN [Thread-956 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741873_1056 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-05T00:25:46,471 WARN [Thread-956 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1914787219-172.17.0.2-1733358323877:blk_1073741873_1056 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44281,DS-0c68e330-c341-401e-ab8a-5ef4bf86e24e,DISK], DatanodeInfoWithStorage[127.0.0.1:38197,DS-e0ccde86-205f-4834-b07f-b6930e81338d,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44281,DS-0c68e330-c341-401e-ab8a-5ef4bf86e24e,DISK]) is bad. 2024-12-05T00:25:46,471 WARN [Thread-956 {}] hdfs.DataStreamer(1850): Abandoning BP-1914787219-172.17.0.2-1733358323877:blk_1073741873_1056 2024-12-05T00:25:46,471 WARN [Thread-956 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44281,DS-0c68e330-c341-401e-ab8a-5ef4bf86e24e,DISK] 2024-12-05T00:25:46,472 WARN [Thread-956 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741874_1057 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-05T00:25:46,472 WARN [Thread-956 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1914787219-172.17.0.2-1733358323877:blk_1073741874_1057 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34943,DS-32e7a19b-9b43-45a3-b613-bb1258e4827b,DISK], DatanodeInfoWithStorage[127.0.0.1:38197,DS-e0ccde86-205f-4834-b07f-b6930e81338d,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34943,DS-32e7a19b-9b43-45a3-b613-bb1258e4827b,DISK]) is bad. 2024-12-05T00:25:46,472 WARN [Thread-956 {}] hdfs.DataStreamer(1850): Abandoning BP-1914787219-172.17.0.2-1733358323877:blk_1073741874_1057 2024-12-05T00:25:46,473 WARN [Thread-956 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34943,DS-32e7a19b-9b43-45a3-b613-bb1258e4827b,DISK] 2024-12-05T00:25:46,473 WARN [IPC Server handler 2 on default port 41383 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-05T00:25:46,473 WARN [IPC Server handler 2 on default port 41383 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-05T00:25:46,473 WARN [IPC Server handler 2 on default port 41383 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-05T00:25:46,476 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38197 is added to blk_1073741875_1058 (size=6027) 2024-12-05T00:25:46,492 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@637afb1f[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:38197, datanodeUuid=675b84f7-6e45-4dbb-ac2e-cec9da575f5c, infoPort=38813, infoSecurePort=0, ipcPort=39191, storageInfo=lv=-57;cid=testClusterID;nsid=355484051;c=1733358323877):Failed to transfer BP-1914787219-172.17.0.2-1733358323877:blk_1073741845_1028 to 127.0.0.1:44281 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-05T00:25:46,492 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@6dbd1900[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:38197, datanodeUuid=675b84f7-6e45-4dbb-ac2e-cec9da575f5c, infoPort=38813, infoSecurePort=0, ipcPort=39191, storageInfo=lv=-57;cid=testClusterID;nsid=355484051;c=1733358323877):Failed to transfer BP-1914787219-172.17.0.2-1733358323877:blk_1073741855_1038 to 127.0.0.1:44281 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-05T00:25:46,877 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=45 (bloomFilter=true), to=hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8b4f65ad1ec496c8c72c57384036660f/.tmp/info/9dce3bd6c22c4ef7a5d699c43f5de131 2024-12-05T00:25:46,883 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8b4f65ad1ec496c8c72c57384036660f/.tmp/info/9dce3bd6c22c4ef7a5d699c43f5de131 as hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8b4f65ad1ec496c8c72c57384036660f/info/9dce3bd6c22c4ef7a5d699c43f5de131 2024-12-05T00:25:46,888 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8b4f65ad1ec496c8c72c57384036660f/info/9dce3bd6c22c4ef7a5d699c43f5de131, entries=1, sequenceid=45, filesize=5.9 K 2024-12-05T00:25:46,889 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=2.10 KB/2150 for 8b4f65ad1ec496c8c72c57384036660f in 430ms, sequenceid=45, compaction requested=false 2024-12-05T00:25:46,889 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 8b4f65ad1ec496c8c72c57384036660f: 2024-12-05T00:25:46,890 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=23.5 K, sizeToCheck=16.0 K 2024-12-05T00:25:46,890 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-05T00:25:46,890 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8b4f65ad1ec496c8c72c57384036660f/info/d7bab672cc8f4616ab39fbc10f45d899 because midkey is the same as first or last row 2024-12-05T00:25:47,017 INFO [regionserver/2113c16e5528:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35205,DS-7b93ce7a-bb65-4ad6-a1d4-75b5c5ef17f8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-05T00:25:47,017 WARN [regionserver/2113c16e5528:0.logRoller {}] wal.FSHLog(539): Too many consecutive RollWriter requests, it's a sign of the total number of live datanodes is lower than the tolerable replicas. 2024-12-05T00:25:47,053 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35205,DS-7b93ce7a-bb65-4ad6-a1d4-75b5c5ef17f8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-05T00:25:47,075 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-05T00:25:47,080 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-05T00:25:47,081 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-05T00:25:47,081 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-05T00:25:47,081 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-05T00:25:47,081 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@16a2580d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7531cff9-2fe6-fdba-fd33-24a68d0721dc/hadoop.log.dir/,AVAILABLE} 2024-12-05T00:25:47,081 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@23df32cf{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-05T00:25:47,196 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@c1d8e25{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7531cff9-2fe6-fdba-fd33-24a68d0721dc/java.io.tmpdir/jetty-localhost-40047-hadoop-hdfs-3_4_1-tests_jar-_-any-18108092690568935513/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-05T00:25:47,196 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5af75f98{HTTP/1.1, (http/1.1)}{localhost:40047} 2024-12-05T00:25:47,197 INFO [Time-limited test {}] server.Server(415): Started @127824ms 2024-12-05T00:25:47,198 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-05T00:25:47,295 WARN [Thread-975 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-05T00:25:47,303 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xe704a8acab55278d with lease ID 0x6f798abb99ec6dd7: from storage DS-32e7a19b-9b43-45a3-b613-bb1258e4827b node DatanodeRegistration(127.0.0.1:33219, datanodeUuid=62e5d8db-0dd5-4ee1-910d-f64b93970567, infoPort=33939, infoSecurePort=0, ipcPort=38453, storageInfo=lv=-57;cid=testClusterID;nsid=355484051;c=1733358323877), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-05T00:25:47,303 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xe704a8acab55278d with lease ID 0x6f798abb99ec6dd7: from storage DS-848595dd-f9d6-4acc-afbc-5291a9af1401 node DatanodeRegistration(127.0.0.1:33219, datanodeUuid=62e5d8db-0dd5-4ee1-910d-f64b93970567, infoPort=33939, infoSecurePort=0, ipcPort=38453, storageInfo=lv=-57;cid=testClusterID;nsid=355484051;c=1733358323877), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-05T00:25:48,173 INFO [regionserver/2113c16e5528:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35205,DS-7b93ce7a-bb65-4ad6-a1d4-75b5c5ef17f8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-05T00:25:48,493 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@6dbd1900[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:38197, datanodeUuid=675b84f7-6e45-4dbb-ac2e-cec9da575f5c, infoPort=38813, infoSecurePort=0, ipcPort=39191, storageInfo=lv=-57;cid=testClusterID;nsid=355484051;c=1733358323877):Failed to transfer BP-1914787219-172.17.0.2-1733358323877:blk_1073741870_1053 to 127.0.0.1:44281 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-05T00:25:48,496 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33219 is added to blk_1073741865_1048 (size=6027) 2024-12-05T00:25:49,017 INFO [regionserver/2113c16e5528:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35205,DS-7b93ce7a-bb65-4ad6-a1d4-75b5c5ef17f8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-05T00:25:49,053 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35205,DS-7b93ce7a-bb65-4ad6-a1d4-75b5c5ef17f8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-05T00:25:49,493 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@637afb1f[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:38197, datanodeUuid=675b84f7-6e45-4dbb-ac2e-cec9da575f5c, infoPort=38813, infoSecurePort=0, ipcPort=39191, storageInfo=lv=-57;cid=testClusterID;nsid=355484051;c=1733358323877):Failed to transfer BP-1914787219-172.17.0.2-1733358323877:blk_1073741875_1058 to 127.0.0.1:44281 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-05T00:25:50,174 INFO [regionserver/2113c16e5528:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35205,DS-7b93ce7a-bb65-4ad6-a1d4-75b5c5ef17f8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-05T00:25:51,018 INFO [regionserver/2113c16e5528:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35205,DS-7b93ce7a-bb65-4ad6-a1d4-75b5c5ef17f8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-05T00:25:51,053 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35205,DS-7b93ce7a-bb65-4ad6-a1d4-75b5c5ef17f8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-05T00:25:52,174 INFO [regionserver/2113c16e5528:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35205,DS-7b93ce7a-bb65-4ad6-a1d4-75b5c5ef17f8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-05T00:25:53,018 INFO [regionserver/2113c16e5528:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35205,DS-7b93ce7a-bb65-4ad6-a1d4-75b5c5ef17f8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-05T00:25:53,054 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35205,DS-7b93ce7a-bb65-4ad6-a1d4-75b5c5ef17f8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-05T00:25:54,175 INFO [regionserver/2113c16e5528:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35205,DS-7b93ce7a-bb65-4ad6-a1d4-75b5c5ef17f8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-05T00:25:54,768 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-05T00:25:55,018 INFO [regionserver/2113c16e5528:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35205,DS-7b93ce7a-bb65-4ad6-a1d4-75b5c5ef17f8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-05T00:25:55,054 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35205,DS-7b93ce7a-bb65-4ad6-a1d4-75b5c5ef17f8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-05T00:25:55,229 ERROR [FSHLog-0-hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/MasterData-prefix:2113c16e5528,43963,1733358324824 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35205,DS-7b93ce7a-bb65-4ad6-a1d4-75b5c5ef17f8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-05T00:25:55,229 WARN [FSHLog-0-hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/MasterData-prefix:2113c16e5528,43963,1733358324824 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35205,DS-7b93ce7a-bb65-4ad6-a1d4-75b5c5ef17f8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-05T00:25:55,229 DEBUG [master:store-WAL-Roller {}] wal.AbstractWALRoller(198): WAL FSHLog 2113c16e5528%2C43963%2C1733358324824:(num 1733358325078) roll requested 2024-12-05T00:25:55,229 INFO [master:store-WAL-Roller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 2113c16e5528%2C43963%2C1733358324824.1733358355229 2024-12-05T00:25:55,232 WARN [Thread-996 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741876_1059 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-05T00:25:55,232 WARN [Thread-996 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1914787219-172.17.0.2-1733358323877:blk_1073741876_1059 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33065,DS-cf54d914-b4bf-4dee-84bc-66450ddb912c,DISK], DatanodeInfoWithStorage[127.0.0.1:44281,DS-0c68e330-c341-401e-ab8a-5ef4bf86e24e,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33065,DS-cf54d914-b4bf-4dee-84bc-66450ddb912c,DISK]) is bad. 2024-12-05T00:25:55,232 WARN [Thread-996 {}] hdfs.DataStreamer(1850): Abandoning BP-1914787219-172.17.0.2-1733358323877:blk_1073741876_1059 2024-12-05T00:25:55,233 WARN [Thread-996 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33065,DS-cf54d914-b4bf-4dee-84bc-66450ddb912c,DISK] 2024-12-05T00:25:55,237 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:25:55,237 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:25:55,237 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:25:55,237 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:25:55,237 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:25:55,237 INFO [master:store-WAL-Roller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/MasterData/WALs/2113c16e5528,43963,1733358324824/2113c16e5528%2C43963%2C1733358324824.1733358325078 with entries=54, filesize=26.67 KB; new WAL /user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/MasterData/WALs/2113c16e5528,43963,1733358324824/2113c16e5528%2C43963%2C1733358324824.1733358355229 2024-12-05T00:25:55,238 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35205,DS-7b93ce7a-bb65-4ad6-a1d4-75b5c5ef17f8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-05T00:25:55,238 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35205,DS-7b93ce7a-bb65-4ad6-a1d4-75b5c5ef17f8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-05T00:25:55,238 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/MasterData/WALs/2113c16e5528,43963,1733358324824/2113c16e5528%2C43963%2C1733358324824.1733358325078 2024-12-05T00:25:55,238 WARN [IPC Server handler 2 on default port 41383 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/MasterData/WALs/2113c16e5528,43963,1733358324824/2113c16e5528%2C43963%2C1733358324824.1733358325078 has not been closed. Lease recovery is in progress. RecoveryId = 1061 for block blk_1073741830_1006 2024-12-05T00:25:55,239 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/MasterData/WALs/2113c16e5528,43963,1733358324824/2113c16e5528%2C43963%2C1733358324824.1733358325078 after 0ms 2024-12-05T00:25:55,239 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33939:33939),(127.0.0.1/127.0.0.1:38813:38813)] 2024-12-05T00:25:55,239 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(879): hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/MasterData/WALs/2113c16e5528,43963,1733358324824/2113c16e5528%2C43963%2C1733358324824.1733358325078 is not closed yet, will try archiving it next time 2024-12-05T00:25:56,175 INFO [regionserver/2113c16e5528:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35205,DS-7b93ce7a-bb65-4ad6-a1d4-75b5c5ef17f8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-05T00:25:57,019 INFO [regionserver/2113c16e5528:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35205,DS-7b93ce7a-bb65-4ad6-a1d4-75b5c5ef17f8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-05T00:25:57,318 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@3c5c3f20 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-1914787219-172.17.0.2-1733358323877:blk_1073741833_1009, datanode=DatanodeInfoWithStorage[127.0.0.1:35205,null,null]) java.net.ConnectException: Call From 2113c16e5528/172.17.0.2 to localhost:41801 failed on connection exception: java.net.ConnectException: Connection refused; For more details see: http://wiki.apache.org/hadoop/ConnectionRefused at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:876) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:668) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-12-05T00:25:57,320 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33219 is added to blk_1073741833_1019 (size=455) 2024-12-05T00:25:57,983 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,37749,1733358324931/2113c16e5528%2C37749%2C1733358324931.1733358325568 to hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/oldWALs/2113c16e5528%2C37749%2C1733358324931.1733358325568 2024-12-05T00:25:57,984 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,37749,1733358324931/2113c16e5528%2C37749%2C1733358324931.1733358342978 to hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/oldWALs/2113c16e5528%2C37749%2C1733358324931.1733358342978 2024-12-05T00:25:58,175 INFO [regionserver/2113c16e5528:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35205,DS-7b93ce7a-bb65-4ad6-a1d4-75b5c5ef17f8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-05T00:25:59,019 INFO [regionserver/2113c16e5528:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35205,DS-7b93ce7a-bb65-4ad6-a1d4-75b5c5ef17f8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-05T00:25:59,240 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/MasterData/WALs/2113c16e5528,43963,1733358324824/2113c16e5528%2C43963%2C1733358324824.1733358325078 after 4002ms 2024-12-05T00:26:00,176 INFO [regionserver/2113c16e5528:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35205,DS-7b93ce7a-bb65-4ad6-a1d4-75b5c5ef17f8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-05T00:26:00,299 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38197 is added to blk_1073741833_1019 (size=455) 2024-12-05T00:26:01,019 INFO [regionserver/2113c16e5528:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35205,DS-7b93ce7a-bb65-4ad6-a1d4-75b5c5ef17f8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-05T00:26:02,176 INFO [regionserver/2113c16e5528:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35205,DS-7b93ce7a-bb65-4ad6-a1d4-75b5c5ef17f8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-05T00:26:02,928 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 2113c16e5528%2C37749%2C1733358324931.1733358362928 2024-12-05T00:26:02,932 WARN [Thread-1007 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741878_1062 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:33065 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-05T00:26:02,932 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-377644347_22 at /127.0.0.1:54382 [Receiving block BP-1914787219-172.17.0.2-1733358323877:blk_1073741878_1062] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7531cff9-2fe6-fdba-fd33-24a68d0721dc/cluster_d40ff393-c986-a4ef-b5d0-88d02549ef9a/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7531cff9-2fe6-fdba-fd33-24a68d0721dc/cluster_d40ff393-c986-a4ef-b5d0-88d02549ef9a/data/data6]'}, localName='127.0.0.1:38197', datanodeUuid='675b84f7-6e45-4dbb-ac2e-cec9da575f5c', xmitsInProgress=0}:Exception transferring block BP-1914787219-172.17.0.2-1733358323877:blk_1073741878_1062 to mirror 127.0.0.1:33065 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-05T00:26:02,932 WARN [Thread-1007 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1914787219-172.17.0.2-1733358323877:blk_1073741878_1062 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38197,DS-e0ccde86-205f-4834-b07f-b6930e81338d,DISK], DatanodeInfoWithStorage[127.0.0.1:33065,DS-cf54d914-b4bf-4dee-84bc-66450ddb912c,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:33065,DS-cf54d914-b4bf-4dee-84bc-66450ddb912c,DISK]) is bad. 2024-12-05T00:26:02,932 WARN [Thread-1007 {}] hdfs.DataStreamer(1850): Abandoning BP-1914787219-172.17.0.2-1733358323877:blk_1073741878_1062 2024-12-05T00:26:02,932 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-377644347_22 at /127.0.0.1:54382 [Receiving block BP-1914787219-172.17.0.2-1733358323877:blk_1073741878_1062] {}] datanode.BlockReceiver(316): Block 1073741878 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-12-05T00:26:02,932 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-377644347_22 at /127.0.0.1:54382 [Receiving block BP-1914787219-172.17.0.2-1733358323877:blk_1073741878_1062] {}] datanode.DataXceiver(331): 127.0.0.1:38197:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54382 dst: /127.0.0.1:38197 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-05T00:26:02,933 WARN [Thread-1007 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33065,DS-cf54d914-b4bf-4dee-84bc-66450ddb912c,DISK] 2024-12-05T00:26:02,935 WARN [Thread-1007 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741879_1063 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:44281 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-05T00:26:02,935 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-377644347_22 at /127.0.0.1:54130 [Receiving block BP-1914787219-172.17.0.2-1733358323877:blk_1073741879_1063] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7531cff9-2fe6-fdba-fd33-24a68d0721dc/cluster_d40ff393-c986-a4ef-b5d0-88d02549ef9a/data/data3, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7531cff9-2fe6-fdba-fd33-24a68d0721dc/cluster_d40ff393-c986-a4ef-b5d0-88d02549ef9a/data/data4]'}, localName='127.0.0.1:33219', datanodeUuid='62e5d8db-0dd5-4ee1-910d-f64b93970567', xmitsInProgress=0}:Exception transferring block BP-1914787219-172.17.0.2-1733358323877:blk_1073741879_1063 to mirror 127.0.0.1:44281 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-05T00:26:02,935 WARN [Thread-1007 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1914787219-172.17.0.2-1733358323877:blk_1073741879_1063 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33219,DS-32e7a19b-9b43-45a3-b613-bb1258e4827b,DISK], DatanodeInfoWithStorage[127.0.0.1:44281,DS-0c68e330-c341-401e-ab8a-5ef4bf86e24e,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:44281,DS-0c68e330-c341-401e-ab8a-5ef4bf86e24e,DISK]) is bad. 2024-12-05T00:26:02,935 WARN [Thread-1007 {}] hdfs.DataStreamer(1850): Abandoning BP-1914787219-172.17.0.2-1733358323877:blk_1073741879_1063 2024-12-05T00:26:02,935 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-377644347_22 at /127.0.0.1:54130 [Receiving block BP-1914787219-172.17.0.2-1733358323877:blk_1073741879_1063] {}] datanode.BlockReceiver(316): Block 1073741879 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-12-05T00:26:02,935 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-377644347_22 at /127.0.0.1:54130 [Receiving block BP-1914787219-172.17.0.2-1733358323877:blk_1073741879_1063] {}] datanode.DataXceiver(331): 127.0.0.1:33219:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54130 dst: /127.0.0.1:33219 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-05T00:26:02,936 WARN [Thread-1007 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44281,DS-0c68e330-c341-401e-ab8a-5ef4bf86e24e,DISK] 2024-12-05T00:26:02,939 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:26:02,939 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:26:02,939 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:26:02,940 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:26:02,940 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:26:02,940 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,37749,1733358324931/2113c16e5528%2C37749%2C1733358324931.1733358345001 with entries=15, filesize=13.26 KB; new WAL /user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,37749,1733358324931/2113c16e5528%2C37749%2C1733358324931.1733358362928 2024-12-05T00:26:02,941 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38813:38813),(127.0.0.1/127.0.0.1:33939:33939)] 2024-12-05T00:26:02,941 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,37749,1733358324931/2113c16e5528%2C37749%2C1733358324931.1733358345001 is not closed yet, will try archiving it next time 2024-12-05T00:26:02,941 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38197 is added to blk_1073741860_1043 (size=13591) 2024-12-05T00:26:02,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37749 {}] regionserver.HRegion(8855): Flush requested on 8b4f65ad1ec496c8c72c57384036660f 2024-12-05T00:26:02,951 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 8b4f65ad1ec496c8c72c57384036660f 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-12-05T00:26:02,956 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8b4f65ad1ec496c8c72c57384036660f/.tmp/info/abe2c8786f0c43c1958a6fae013d2e69 is 1080, key is row0013/info:/1733358362942/Put/seqid=0 2024-12-05T00:26:02,959 WARN [Thread-1015 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741881_1065 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:44281 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-05T00:26:02,959 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-570305084_22 at /127.0.0.1:54394 [Receiving block BP-1914787219-172.17.0.2-1733358323877:blk_1073741881_1065] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7531cff9-2fe6-fdba-fd33-24a68d0721dc/cluster_d40ff393-c986-a4ef-b5d0-88d02549ef9a/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7531cff9-2fe6-fdba-fd33-24a68d0721dc/cluster_d40ff393-c986-a4ef-b5d0-88d02549ef9a/data/data6]'}, localName='127.0.0.1:38197', datanodeUuid='675b84f7-6e45-4dbb-ac2e-cec9da575f5c', xmitsInProgress=0}:Exception transferring block BP-1914787219-172.17.0.2-1733358323877:blk_1073741881_1065 to mirror 127.0.0.1:44281 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-05T00:26:02,959 WARN [Thread-1015 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1914787219-172.17.0.2-1733358323877:blk_1073741881_1065 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38197,DS-e0ccde86-205f-4834-b07f-b6930e81338d,DISK], DatanodeInfoWithStorage[127.0.0.1:44281,DS-0c68e330-c341-401e-ab8a-5ef4bf86e24e,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:44281,DS-0c68e330-c341-401e-ab8a-5ef4bf86e24e,DISK]) is bad. 2024-12-05T00:26:02,959 WARN [Thread-1015 {}] hdfs.DataStreamer(1850): Abandoning BP-1914787219-172.17.0.2-1733358323877:blk_1073741881_1065 2024-12-05T00:26:02,959 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-570305084_22 at /127.0.0.1:54394 [Receiving block BP-1914787219-172.17.0.2-1733358323877:blk_1073741881_1065] {}] datanode.BlockReceiver(316): Block 1073741881 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-05T00:26:02,959 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-570305084_22 at /127.0.0.1:54394 [Receiving block BP-1914787219-172.17.0.2-1733358323877:blk_1073741881_1065] {}] datanode.DataXceiver(331): 127.0.0.1:38197:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54394 dst: /127.0.0.1:38197 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-05T00:26:02,959 WARN [Thread-1015 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44281,DS-0c68e330-c341-401e-ab8a-5ef4bf86e24e,DISK] 2024-12-05T00:26:02,961 WARN [Thread-1015 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741882_1066 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-05T00:26:02,961 WARN [Thread-1015 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1914787219-172.17.0.2-1733358323877:blk_1073741882_1066 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33065,DS-cf54d914-b4bf-4dee-84bc-66450ddb912c,DISK], DatanodeInfoWithStorage[127.0.0.1:38197,DS-e0ccde86-205f-4834-b07f-b6930e81338d,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33065,DS-cf54d914-b4bf-4dee-84bc-66450ddb912c,DISK]) is bad. 2024-12-05T00:26:02,961 WARN [Thread-1015 {}] hdfs.DataStreamer(1850): Abandoning BP-1914787219-172.17.0.2-1733358323877:blk_1073741882_1066 2024-12-05T00:26:02,962 WARN [Thread-1015 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33065,DS-cf54d914-b4bf-4dee-84bc-66450ddb912c,DISK] 2024-12-05T00:26:02,970 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38197 is added to blk_1073741883_1067 (size=11421) 2024-12-05T00:26:02,970 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33219 is added to blk_1073741883_1067 (size=11421) 2024-12-05T00:26:02,970 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=55 (bloomFilter=true), to=hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8b4f65ad1ec496c8c72c57384036660f/.tmp/info/abe2c8786f0c43c1958a6fae013d2e69 2024-12-05T00:26:02,976 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8b4f65ad1ec496c8c72c57384036660f/.tmp/info/abe2c8786f0c43c1958a6fae013d2e69 as hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8b4f65ad1ec496c8c72c57384036660f/info/abe2c8786f0c43c1958a6fae013d2e69 2024-12-05T00:26:02,981 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8b4f65ad1ec496c8c72c57384036660f/info/abe2c8786f0c43c1958a6fae013d2e69, entries=6, sequenceid=55, filesize=11.2 K 2024-12-05T00:26:02,982 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7530, heapSize ~8.11 KB/8304, currentSize=6.30 KB/6455 for 8b4f65ad1ec496c8c72c57384036660f in 31ms, sequenceid=55, compaction requested=true 2024-12-05T00:26:02,982 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 8b4f65ad1ec496c8c72c57384036660f: 2024-12-05T00:26:02,983 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=34.6 K, sizeToCheck=16.0 K 2024-12-05T00:26:02,983 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-05T00:26:02,983 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8b4f65ad1ec496c8c72c57384036660f/info/d7bab672cc8f4616ab39fbc10f45d899 because midkey is the same as first or last row 2024-12-05T00:26:02,983 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8b4f65ad1ec496c8c72c57384036660f:info, priority=-2147483648, current under compaction store size is 1 2024-12-05T00:26:02,983 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-05T00:26:02,983 DEBUG [RS:0;2113c16e5528:37749-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-05T00:26:02,984 DEBUG [RS:0;2113c16e5528:37749-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 35442 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-05T00:26:02,984 DEBUG [RS:0;2113c16e5528:37749-shortCompactions-0 {}] regionserver.HStore(1541): 8b4f65ad1ec496c8c72c57384036660f/info is initiating minor compaction (all files) 2024-12-05T00:26:02,984 INFO [RS:0;2113c16e5528:37749-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 8b4f65ad1ec496c8c72c57384036660f/info in TestLogRolling-testLogRollOnDatanodeDeath,,1733358326207.8b4f65ad1ec496c8c72c57384036660f. 2024-12-05T00:26:02,984 INFO [RS:0;2113c16e5528:37749-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8b4f65ad1ec496c8c72c57384036660f/info/d7bab672cc8f4616ab39fbc10f45d899, hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8b4f65ad1ec496c8c72c57384036660f/info/9dce3bd6c22c4ef7a5d699c43f5de131, hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8b4f65ad1ec496c8c72c57384036660f/info/abe2c8786f0c43c1958a6fae013d2e69] into tmpdir=hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8b4f65ad1ec496c8c72c57384036660f/.tmp, totalSize=34.6 K 2024-12-05T00:26:02,985 DEBUG [RS:0;2113c16e5528:37749-shortCompactions-0 {}] compactions.Compactor(225): Compacting d7bab672cc8f4616ab39fbc10f45d899, keycount=12, bloomtype=ROW, size=17.6 K, encoding=NONE, compression=NONE, seqNum=34, earliestPutTs=1733358338969 2024-12-05T00:26:02,985 DEBUG [RS:0;2113c16e5528:37749-shortCompactions-0 {}] compactions.Compactor(225): Compacting 9dce3bd6c22c4ef7a5d699c43f5de131, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=45, earliestPutTs=1733358346458 2024-12-05T00:26:02,985 DEBUG [RS:0;2113c16e5528:37749-shortCompactions-0 {}] compactions.Compactor(225): Compacting abe2c8786f0c43c1958a6fae013d2e69, keycount=6, bloomtype=ROW, size=11.2 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1733358346864 2024-12-05T00:26:02,999 INFO [RS:0;2113c16e5528:37749-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8b4f65ad1ec496c8c72c57384036660f#info#compaction#24 average throughput is 17.44 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-05T00:26:03,000 DEBUG [RS:0;2113c16e5528:37749-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8b4f65ad1ec496c8c72c57384036660f/.tmp/info/823bede8ec8042839a23152b2011aa30 is 1080, key is row0002/info:/1733358338969/Put/seqid=0 2024-12-05T00:26:03,002 WARN [Thread-1025 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741884_1068 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:44281 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-05T00:26:03,002 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-570305084_22 at /127.0.0.1:54162 [Receiving block BP-1914787219-172.17.0.2-1733358323877:blk_1073741884_1068] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7531cff9-2fe6-fdba-fd33-24a68d0721dc/cluster_d40ff393-c986-a4ef-b5d0-88d02549ef9a/data/data3, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7531cff9-2fe6-fdba-fd33-24a68d0721dc/cluster_d40ff393-c986-a4ef-b5d0-88d02549ef9a/data/data4]'}, localName='127.0.0.1:33219', datanodeUuid='62e5d8db-0dd5-4ee1-910d-f64b93970567', xmitsInProgress=0}:Exception transferring block BP-1914787219-172.17.0.2-1733358323877:blk_1073741884_1068 to mirror 127.0.0.1:44281 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-05T00:26:03,003 WARN [Thread-1025 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1914787219-172.17.0.2-1733358323877:blk_1073741884_1068 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33219,DS-32e7a19b-9b43-45a3-b613-bb1258e4827b,DISK], DatanodeInfoWithStorage[127.0.0.1:44281,DS-0c68e330-c341-401e-ab8a-5ef4bf86e24e,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:44281,DS-0c68e330-c341-401e-ab8a-5ef4bf86e24e,DISK]) is bad. 2024-12-05T00:26:03,003 WARN [Thread-1025 {}] hdfs.DataStreamer(1850): Abandoning BP-1914787219-172.17.0.2-1733358323877:blk_1073741884_1068 2024-12-05T00:26:03,003 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-570305084_22 at /127.0.0.1:54162 [Receiving block BP-1914787219-172.17.0.2-1733358323877:blk_1073741884_1068] {}] datanode.BlockReceiver(316): Block 1073741884 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-05T00:26:03,003 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-570305084_22 at /127.0.0.1:54162 [Receiving block BP-1914787219-172.17.0.2-1733358323877:blk_1073741884_1068] {}] datanode.DataXceiver(331): 127.0.0.1:33219:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54162 dst: /127.0.0.1:33219 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-05T00:26:03,003 WARN [Thread-1025 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44281,DS-0c68e330-c341-401e-ab8a-5ef4bf86e24e,DISK] 2024-12-05T00:26:03,004 WARN [Thread-1025 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741885_1069 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-05T00:26:03,005 WARN [Thread-1025 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1914787219-172.17.0.2-1733358323877:blk_1073741885_1069 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33065,DS-cf54d914-b4bf-4dee-84bc-66450ddb912c,DISK], DatanodeInfoWithStorage[127.0.0.1:33219,DS-32e7a19b-9b43-45a3-b613-bb1258e4827b,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33065,DS-cf54d914-b4bf-4dee-84bc-66450ddb912c,DISK]) is bad. 2024-12-05T00:26:03,005 WARN [Thread-1025 {}] hdfs.DataStreamer(1850): Abandoning BP-1914787219-172.17.0.2-1733358323877:blk_1073741885_1069 2024-12-05T00:26:03,005 WARN [Thread-1025 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33065,DS-cf54d914-b4bf-4dee-84bc-66450ddb912c,DISK] 2024-12-05T00:26:03,009 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33219 is added to blk_1073741886_1070 (size=23502) 2024-12-05T00:26:03,010 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38197 is added to blk_1073741886_1070 (size=23502) 2024-12-05T00:26:03,016 DEBUG [RS:0;2113c16e5528:37749-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8b4f65ad1ec496c8c72c57384036660f/.tmp/info/823bede8ec8042839a23152b2011aa30 as hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8b4f65ad1ec496c8c72c57384036660f/info/823bede8ec8042839a23152b2011aa30 2024-12-05T00:26:03,020 INFO [regionserver/2113c16e5528:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35205,DS-7b93ce7a-bb65-4ad6-a1d4-75b5c5ef17f8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-05T00:26:03,020 INFO [regionserver/2113c16e5528:0.logRoller {}] wal.FSHLog(556): LowReplication-Roller was enabled. 2024-12-05T00:26:03,023 INFO [RS:0;2113c16e5528:37749-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 8b4f65ad1ec496c8c72c57384036660f/info of 8b4f65ad1ec496c8c72c57384036660f into 823bede8ec8042839a23152b2011aa30(size=23.0 K), total size for store is 23.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-05T00:26:03,023 DEBUG [RS:0;2113c16e5528:37749-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 8b4f65ad1ec496c8c72c57384036660f: 2024-12-05T00:26:03,023 INFO [RS:0;2113c16e5528:37749-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRollOnDatanodeDeath,,1733358326207.8b4f65ad1ec496c8c72c57384036660f., storeName=8b4f65ad1ec496c8c72c57384036660f/info, priority=13, startTime=1733358362983; duration=0sec 2024-12-05T00:26:03,023 DEBUG [RS:0;2113c16e5528:37749-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=23.0 K, sizeToCheck=16.0 K 2024-12-05T00:26:03,023 DEBUG [RS:0;2113c16e5528:37749-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-05T00:26:03,023 DEBUG [RS:0;2113c16e5528:37749-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8b4f65ad1ec496c8c72c57384036660f/info/823bede8ec8042839a23152b2011aa30 because midkey is the same as first or last row 2024-12-05T00:26:03,023 DEBUG [RS:0;2113c16e5528:37749-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=23.0 K, sizeToCheck=16.0 K 2024-12-05T00:26:03,023 DEBUG [RS:0;2113c16e5528:37749-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-05T00:26:03,024 DEBUG [RS:0;2113c16e5528:37749-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8b4f65ad1ec496c8c72c57384036660f/info/823bede8ec8042839a23152b2011aa30 because midkey is the same as first or last row 2024-12-05T00:26:03,024 DEBUG [RS:0;2113c16e5528:37749-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=23.0 K, sizeToCheck=16.0 K 2024-12-05T00:26:03,024 DEBUG [RS:0;2113c16e5528:37749-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-05T00:26:03,024 DEBUG [RS:0;2113c16e5528:37749-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8b4f65ad1ec496c8c72c57384036660f/info/823bede8ec8042839a23152b2011aa30 because midkey is the same as first or last row 2024-12-05T00:26:03,024 DEBUG [RS:0;2113c16e5528:37749-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-05T00:26:03,024 DEBUG [RS:0;2113c16e5528:37749-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8b4f65ad1ec496c8c72c57384036660f:info 2024-12-05T00:26:03,163 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-05T00:26:03,163 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-05T00:26:03,163 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-05T00:26:03,164 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T00:26:03,164 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T00:26:03,164 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-05T00:26:03,164 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-05T00:26:03,164 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1001612558, stopped=false 2024-12-05T00:26:03,164 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=2113c16e5528,43963,1733358324824 2024-12-05T00:26:03,167 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43963-0x1018002f6bd0000, quorum=127.0.0.1:54477, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-05T00:26:03,167 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37749-0x1018002f6bd0001, quorum=127.0.0.1:54477, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-05T00:26:03,167 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43963-0x1018002f6bd0000, quorum=127.0.0.1:54477, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T00:26:03,167 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45031-0x1018002f6bd0002, quorum=127.0.0.1:54477, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-05T00:26:03,167 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37749-0x1018002f6bd0001, quorum=127.0.0.1:54477, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T00:26:03,167 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45031-0x1018002f6bd0002, quorum=127.0.0.1:54477, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T00:26:03,167 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-05T00:26:03,168 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-05T00:26:03,168 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-05T00:26:03,168 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T00:26:03,168 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:43963-0x1018002f6bd0000, quorum=127.0.0.1:54477, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-05T00:26:03,168 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:45031-0x1018002f6bd0002, quorum=127.0.0.1:54477, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-05T00:26:03,168 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '2113c16e5528,37749,1733358324931' ***** 2024-12-05T00:26:03,168 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-05T00:26:03,168 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:37749-0x1018002f6bd0001, quorum=127.0.0.1:54477, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-05T00:26:03,168 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '2113c16e5528,45031,1733358326108' ***** 2024-12-05T00:26:03,168 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-05T00:26:03,169 INFO [RS:0;2113c16e5528:37749 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-05T00:26:03,169 INFO [RS:1;2113c16e5528:45031 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-05T00:26:03,169 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-05T00:26:03,169 INFO [RS:1;2113c16e5528:45031 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-05T00:26:03,169 INFO [RS:0;2113c16e5528:37749 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-05T00:26:03,169 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-05T00:26:03,169 INFO [RS:0;2113c16e5528:37749 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-05T00:26:03,169 INFO [RS:1;2113c16e5528:45031 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-05T00:26:03,169 INFO [RS:1;2113c16e5528:45031 {}] regionserver.HRegionServer(959): stopping server 2113c16e5528,45031,1733358326108 2024-12-05T00:26:03,169 INFO [RS:1;2113c16e5528:45031 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-05T00:26:03,169 INFO [RS:0;2113c16e5528:37749 {}] regionserver.HRegionServer(3091): Received CLOSE for 8b4f65ad1ec496c8c72c57384036660f 2024-12-05T00:26:03,169 INFO [RS:1;2113c16e5528:45031 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;2113c16e5528:45031. 2024-12-05T00:26:03,169 DEBUG [RS:1;2113c16e5528:45031 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-05T00:26:03,169 DEBUG [RS:1;2113c16e5528:45031 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T00:26:03,169 INFO [RS:1;2113c16e5528:45031 {}] regionserver.HRegionServer(976): stopping server 2113c16e5528,45031,1733358326108; all regions closed. 2024-12-05T00:26:03,169 INFO [RS:0;2113c16e5528:37749 {}] regionserver.HRegionServer(959): stopping server 2113c16e5528,37749,1733358324931 2024-12-05T00:26:03,170 INFO [RS:0;2113c16e5528:37749 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-05T00:26:03,170 INFO [RS:0;2113c16e5528:37749 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;2113c16e5528:37749. 2024-12-05T00:26:03,170 DEBUG [RS:0;2113c16e5528:37749 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-05T00:26:03,170 DEBUG [RS:0;2113c16e5528:37749 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T00:26:03,170 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 8b4f65ad1ec496c8c72c57384036660f, disabling compactions & flushes 2024-12-05T00:26:03,170 INFO [RS:0;2113c16e5528:37749 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-05T00:26:03,170 INFO [RS:0;2113c16e5528:37749 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-05T00:26:03,170 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:26:03,170 INFO [RS:0;2113c16e5528:37749 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-05T00:26:03,170 INFO [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnDatanodeDeath,,1733358326207.8b4f65ad1ec496c8c72c57384036660f. 2024-12-05T00:26:03,170 INFO [RS:0;2113c16e5528:37749 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-05T00:26:03,170 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:26:03,170 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1733358326207.8b4f65ad1ec496c8c72c57384036660f. 2024-12-05T00:26:03,170 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1733358326207.8b4f65ad1ec496c8c72c57384036660f. after waiting 0 ms 2024-12-05T00:26:03,170 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnDatanodeDeath,,1733358326207.8b4f65ad1ec496c8c72c57384036660f. 2024-12-05T00:26:03,170 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:26:03,170 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:26:03,170 INFO [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing 8b4f65ad1ec496c8c72c57384036660f 1/1 column families, dataSize=6.30 KB heapSize=7 KB 2024-12-05T00:26:03,170 INFO [RS:0;2113c16e5528:37749 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-12-05T00:26:03,170 DEBUG [RS:0;2113c16e5528:37749 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, 8b4f65ad1ec496c8c72c57384036660f=TestLogRolling-testLogRollOnDatanodeDeath,,1733358326207.8b4f65ad1ec496c8c72c57384036660f.} 2024-12-05T00:26:03,170 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:26:03,170 DEBUG [RS:0;2113c16e5528:37749 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 8b4f65ad1ec496c8c72c57384036660f 2024-12-05T00:26:03,170 DEBUG [RS_CLOSE_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-05T00:26:03,170 INFO [RS_CLOSE_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-05T00:26:03,171 DEBUG [RS_CLOSE_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-05T00:26:03,171 DEBUG [RS_CLOSE_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-05T00:26:03,171 DEBUG [RS_CLOSE_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-05T00:26:03,171 INFO [RS_CLOSE_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.71 KB heapSize=3.75 KB 2024-12-05T00:26:03,171 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35205,DS-7b93ce7a-bb65-4ad6-a1d4-75b5c5ef17f8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-05T00:26:03,171 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35205,DS-7b93ce7a-bb65-4ad6-a1d4-75b5c5ef17f8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-05T00:26:03,171 ERROR [FSHLog-0-hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8-prefix:2113c16e5528,37749,1733358324931.meta {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35205,DS-7b93ce7a-bb65-4ad6-a1d4-75b5c5ef17f8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-05T00:26:03,171 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,45031,1733358326108/2113c16e5528%2C45031%2C1733358326108.1733358326307 2024-12-05T00:26:03,171 WARN [FSHLog-0-hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8-prefix:2113c16e5528,37749,1733358324931.meta {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35205,DS-7b93ce7a-bb65-4ad6-a1d4-75b5c5ef17f8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-05T00:26:03,171 DEBUG [regionserver/2113c16e5528:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 2113c16e5528%2C37749%2C1733358324931.meta:.meta(num 1733358325913) roll requested 2024-12-05T00:26:03,172 INFO [regionserver/2113c16e5528:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 2113c16e5528%2C37749%2C1733358324931.meta.1733358363171.meta 2024-12-05T00:26:03,172 WARN [IPC Server handler 4 on default port 41383 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,45031,1733358326108/2113c16e5528%2C45031%2C1733358326108.1733358326307 has not been closed. Lease recovery is in progress. RecoveryId = 1071 for block blk_1073741837_1013 2024-12-05T00:26:03,172 INFO [regionserver/2113c16e5528:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-12-05T00:26:03,172 INFO [regionserver/2113c16e5528:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-05T00:26:03,172 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,45031,1733358326108/2113c16e5528%2C45031%2C1733358326108.1733358326307 after 1ms 2024-12-05T00:26:03,178 WARN [Thread-1033 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741887_1072 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-05T00:26:03,179 WARN [Thread-1033 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1914787219-172.17.0.2-1733358323877:blk_1073741887_1072 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44281,DS-0c68e330-c341-401e-ab8a-5ef4bf86e24e,DISK], DatanodeInfoWithStorage[127.0.0.1:33065,DS-cf54d914-b4bf-4dee-84bc-66450ddb912c,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44281,DS-0c68e330-c341-401e-ab8a-5ef4bf86e24e,DISK]) is bad. 2024-12-05T00:26:03,179 WARN [Thread-1033 {}] hdfs.DataStreamer(1850): Abandoning BP-1914787219-172.17.0.2-1733358323877:blk_1073741887_1072 2024-12-05T00:26:03,179 WARN [Thread-1033 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44281,DS-0c68e330-c341-401e-ab8a-5ef4bf86e24e,DISK] 2024-12-05T00:26:03,180 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8b4f65ad1ec496c8c72c57384036660f/.tmp/info/7456c724d83a44d4822cd8a0f47e3c5d is 1080, key is row0018/info:/1733358362952/Put/seqid=0 2024-12-05T00:26:03,186 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38197 is added to blk_1073741889_1074 (size=11421) 2024-12-05T00:26:03,186 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33219 is added to blk_1073741889_1074 (size=11421) 2024-12-05T00:26:03,186 INFO [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.30 KB at sequenceid=65 (bloomFilter=true), to=hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8b4f65ad1ec496c8c72c57384036660f/.tmp/info/7456c724d83a44d4822cd8a0f47e3c5d 2024-12-05T00:26:03,187 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:26:03,188 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:26:03,188 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:26:03,188 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:26:03,188 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:26:03,188 INFO [regionserver/2113c16e5528:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,37749,1733358324931/2113c16e5528%2C37749%2C1733358324931.meta.1733358325913.meta with entries=8, filesize=2.33 KB; new WAL /user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,37749,1733358324931/2113c16e5528%2C37749%2C1733358324931.meta.1733358363171.meta 2024-12-05T00:26:03,189 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35205,DS-7b93ce7a-bb65-4ad6-a1d4-75b5c5ef17f8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-05T00:26:03,189 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35205,DS-7b93ce7a-bb65-4ad6-a1d4-75b5c5ef17f8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-05T00:26:03,189 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,37749,1733358324931/2113c16e5528%2C37749%2C1733358324931.meta.1733358325913.meta 2024-12-05T00:26:03,189 WARN [IPC Server handler 3 on default port 41383 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,37749,1733358324931/2113c16e5528%2C37749%2C1733358324931.meta.1733358325913.meta has not been closed. Lease recovery is in progress. RecoveryId = 1075 for block blk_1073741834_1010 2024-12-05T00:26:03,190 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,37749,1733358324931/2113c16e5528%2C37749%2C1733358324931.meta.1733358325913.meta after 0ms 2024-12-05T00:26:03,192 DEBUG [regionserver/2113c16e5528:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33939:33939),(127.0.0.1/127.0.0.1:38813:38813)] 2024-12-05T00:26:03,192 DEBUG [regionserver/2113c16e5528:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,37749,1733358324931/2113c16e5528%2C37749%2C1733358324931.meta.1733358325913.meta is not closed yet, will try archiving it next time 2024-12-05T00:26:03,193 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8b4f65ad1ec496c8c72c57384036660f/.tmp/info/7456c724d83a44d4822cd8a0f47e3c5d as hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8b4f65ad1ec496c8c72c57384036660f/info/7456c724d83a44d4822cd8a0f47e3c5d 2024-12-05T00:26:03,198 INFO [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8b4f65ad1ec496c8c72c57384036660f/info/7456c724d83a44d4822cd8a0f47e3c5d, entries=6, sequenceid=65, filesize=11.2 K 2024-12-05T00:26:03,199 INFO [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~6.30 KB/6455, heapSize ~6.98 KB/7152, currentSize=0 B/0 for 8b4f65ad1ec496c8c72c57384036660f in 29ms, sequenceid=65, compaction requested=false 2024-12-05T00:26:03,203 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733358326207.8b4f65ad1ec496c8c72c57384036660f.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8b4f65ad1ec496c8c72c57384036660f/info/1e93e6fff97b43198e72b93f061ce025, hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8b4f65ad1ec496c8c72c57384036660f/info/329e30068fe2419b8a20b0a3c7086f73, hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8b4f65ad1ec496c8c72c57384036660f/info/d7bab672cc8f4616ab39fbc10f45d899, hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8b4f65ad1ec496c8c72c57384036660f/info/6618bd3565c54b7a8b97a2d259c6df69, hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8b4f65ad1ec496c8c72c57384036660f/info/9dce3bd6c22c4ef7a5d699c43f5de131, hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8b4f65ad1ec496c8c72c57384036660f/info/abe2c8786f0c43c1958a6fae013d2e69] to archive 2024-12-05T00:26:03,204 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733358326207.8b4f65ad1ec496c8c72c57384036660f.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-05T00:26:03,205 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733358326207.8b4f65ad1ec496c8c72c57384036660f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8b4f65ad1ec496c8c72c57384036660f/info/1e93e6fff97b43198e72b93f061ce025 to hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8b4f65ad1ec496c8c72c57384036660f/info/1e93e6fff97b43198e72b93f061ce025 2024-12-05T00:26:03,206 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733358326207.8b4f65ad1ec496c8c72c57384036660f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8b4f65ad1ec496c8c72c57384036660f/info/329e30068fe2419b8a20b0a3c7086f73 to hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8b4f65ad1ec496c8c72c57384036660f/info/329e30068fe2419b8a20b0a3c7086f73 2024-12-05T00:26:03,208 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733358326207.8b4f65ad1ec496c8c72c57384036660f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8b4f65ad1ec496c8c72c57384036660f/info/d7bab672cc8f4616ab39fbc10f45d899 to hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8b4f65ad1ec496c8c72c57384036660f/info/d7bab672cc8f4616ab39fbc10f45d899 2024-12-05T00:26:03,209 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733358326207.8b4f65ad1ec496c8c72c57384036660f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8b4f65ad1ec496c8c72c57384036660f/info/6618bd3565c54b7a8b97a2d259c6df69 to hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8b4f65ad1ec496c8c72c57384036660f/info/6618bd3565c54b7a8b97a2d259c6df69 2024-12-05T00:26:03,210 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733358326207.8b4f65ad1ec496c8c72c57384036660f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8b4f65ad1ec496c8c72c57384036660f/info/9dce3bd6c22c4ef7a5d699c43f5de131 to hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8b4f65ad1ec496c8c72c57384036660f/info/9dce3bd6c22c4ef7a5d699c43f5de131 2024-12-05T00:26:03,211 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733358326207.8b4f65ad1ec496c8c72c57384036660f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8b4f65ad1ec496c8c72c57384036660f/info/abe2c8786f0c43c1958a6fae013d2e69 to hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8b4f65ad1ec496c8c72c57384036660f/info/abe2c8786f0c43c1958a6fae013d2e69 2024-12-05T00:26:03,212 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733358326207.8b4f65ad1ec496c8c72c57384036660f.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=2113c16e5528:43963 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-12-05T00:26:03,212 WARN [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733358326207.8b4f65ad1ec496c8c72c57384036660f.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [1e93e6fff97b43198e72b93f061ce025=10347, 329e30068fe2419b8a20b0a3c7086f73=12506, d7bab672cc8f4616ab39fbc10f45d899=17994, 6618bd3565c54b7a8b97a2d259c6df69=6027, 9dce3bd6c22c4ef7a5d699c43f5de131=6027, abe2c8786f0c43c1958a6fae013d2e69=11421] 2024-12-05T00:26:03,213 DEBUG [RS_CLOSE_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/data/hbase/meta/1588230740/.tmp/info/be5a281f365a456aabaf048b0b9837db is 203, key is TestLogRolling-testLogRollOnDatanodeDeath,,1733358326207.8b4f65ad1ec496c8c72c57384036660f./info:regioninfo/1733358326577/Put/seqid=0 2024-12-05T00:26:03,216 WARN [Thread-1045 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741890_1076 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:33065 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-05T00:26:03,216 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-570305084_22 at /127.0.0.1:54214 [Receiving block BP-1914787219-172.17.0.2-1733358323877:blk_1073741890_1076] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7531cff9-2fe6-fdba-fd33-24a68d0721dc/cluster_d40ff393-c986-a4ef-b5d0-88d02549ef9a/data/data3, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7531cff9-2fe6-fdba-fd33-24a68d0721dc/cluster_d40ff393-c986-a4ef-b5d0-88d02549ef9a/data/data4]'}, localName='127.0.0.1:33219', datanodeUuid='62e5d8db-0dd5-4ee1-910d-f64b93970567', xmitsInProgress=0}:Exception transferring block BP-1914787219-172.17.0.2-1733358323877:blk_1073741890_1076 to mirror 127.0.0.1:33065 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-05T00:26:03,216 WARN [Thread-1045 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1914787219-172.17.0.2-1733358323877:blk_1073741890_1076 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33219,DS-32e7a19b-9b43-45a3-b613-bb1258e4827b,DISK], DatanodeInfoWithStorage[127.0.0.1:33065,DS-cf54d914-b4bf-4dee-84bc-66450ddb912c,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:33065,DS-cf54d914-b4bf-4dee-84bc-66450ddb912c,DISK]) is bad. 2024-12-05T00:26:03,216 WARN [Thread-1045 {}] hdfs.DataStreamer(1850): Abandoning BP-1914787219-172.17.0.2-1733358323877:blk_1073741890_1076 2024-12-05T00:26:03,216 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-570305084_22 at /127.0.0.1:54214 [Receiving block BP-1914787219-172.17.0.2-1733358323877:blk_1073741890_1076] {}] datanode.BlockReceiver(316): Block 1073741890 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-05T00:26:03,216 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-570305084_22 at /127.0.0.1:54214 [Receiving block BP-1914787219-172.17.0.2-1733358323877:blk_1073741890_1076] {}] datanode.DataXceiver(331): 127.0.0.1:33219:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54214 dst: /127.0.0.1:33219 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-05T00:26:03,217 WARN [Thread-1045 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33065,DS-cf54d914-b4bf-4dee-84bc-66450ddb912c,DISK] 2024-12-05T00:26:03,217 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8b4f65ad1ec496c8c72c57384036660f/recovered.edits/68.seqid, newMaxSeqId=68, maxSeqId=1 2024-12-05T00:26:03,217 INFO [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1733358326207.8b4f65ad1ec496c8c72c57384036660f. 2024-12-05T00:26:03,217 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 8b4f65ad1ec496c8c72c57384036660f: Waiting for close lock at 1733358363170Running coprocessor pre-close hooks at 1733358363170Disabling compacts and flushes for region at 1733358363170Disabling writes for close at 1733358363170Obtaining lock to block concurrent updates at 1733358363170Preparing flush snapshotting stores in 8b4f65ad1ec496c8c72c57384036660f at 1733358363170Finished memstore snapshotting TestLogRolling-testLogRollOnDatanodeDeath,,1733358326207.8b4f65ad1ec496c8c72c57384036660f., syncing WAL and waiting on mvcc, flushsize=dataSize=6455, getHeapSize=7152, getOffHeapSize=0, getCellsCount=6 at 1733358363171 (+1 ms)Flushing stores of TestLogRolling-testLogRollOnDatanodeDeath,,1733358326207.8b4f65ad1ec496c8c72c57384036660f. at 1733358363171Flushing 8b4f65ad1ec496c8c72c57384036660f/info: creating writer at 1733358363171Flushing 8b4f65ad1ec496c8c72c57384036660f/info: appending metadata at 1733358363179 (+8 ms)Flushing 8b4f65ad1ec496c8c72c57384036660f/info: closing flushed file at 1733358363179Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@10a5c2ba: reopening flushed file at 1733358363192 (+13 ms)Finished flush of dataSize ~6.30 KB/6455, heapSize ~6.98 KB/7152, currentSize=0 B/0 for 8b4f65ad1ec496c8c72c57384036660f in 29ms, sequenceid=65, compaction requested=false at 1733358363199 (+7 ms)Writing region close event to WAL at 1733358363213 (+14 ms)Running coprocessor post-close hooks at 1733358363217 (+4 ms)Closed at 1733358363217 2024-12-05T00:26:03,218 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1733358326207.8b4f65ad1ec496c8c72c57384036660f. 2024-12-05T00:26:03,218 WARN [Thread-1045 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741891_1077 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-05T00:26:03,218 WARN [Thread-1045 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1914787219-172.17.0.2-1733358323877:blk_1073741891_1077 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35205,DS-7b93ce7a-bb65-4ad6-a1d4-75b5c5ef17f8,DISK], DatanodeInfoWithStorage[127.0.0.1:33219,DS-32e7a19b-9b43-45a3-b613-bb1258e4827b,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35205,DS-7b93ce7a-bb65-4ad6-a1d4-75b5c5ef17f8,DISK]) is bad. 2024-12-05T00:26:03,218 WARN [Thread-1045 {}] hdfs.DataStreamer(1850): Abandoning BP-1914787219-172.17.0.2-1733358323877:blk_1073741891_1077 2024-12-05T00:26:03,218 WARN [Thread-1045 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35205,DS-7b93ce7a-bb65-4ad6-a1d4-75b5c5ef17f8,DISK] 2024-12-05T00:26:03,226 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38197 is added to blk_1073741892_1078 (size=7089) 2024-12-05T00:26:03,227 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33219 is added to blk_1073741892_1078 (size=7089) 2024-12-05T00:26:03,227 INFO [RS_CLOSE_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.50 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/data/hbase/meta/1588230740/.tmp/info/be5a281f365a456aabaf048b0b9837db 2024-12-05T00:26:03,247 DEBUG [RS_CLOSE_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/data/hbase/meta/1588230740/.tmp/ns/f20969f189f247d4b7a8a8a3bee4b6dd is 43, key is default/ns:d/1733358325970/Put/seqid=0 2024-12-05T00:26:03,249 WARN [Thread-1053 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741893_1079 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:33065 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-05T00:26:03,249 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-570305084_22 at /127.0.0.1:54468 [Receiving block BP-1914787219-172.17.0.2-1733358323877:blk_1073741893_1079] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7531cff9-2fe6-fdba-fd33-24a68d0721dc/cluster_d40ff393-c986-a4ef-b5d0-88d02549ef9a/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7531cff9-2fe6-fdba-fd33-24a68d0721dc/cluster_d40ff393-c986-a4ef-b5d0-88d02549ef9a/data/data6]'}, localName='127.0.0.1:38197', datanodeUuid='675b84f7-6e45-4dbb-ac2e-cec9da575f5c', xmitsInProgress=0}:Exception transferring block BP-1914787219-172.17.0.2-1733358323877:blk_1073741893_1079 to mirror 127.0.0.1:33065 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-05T00:26:03,250 WARN [Thread-1053 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1914787219-172.17.0.2-1733358323877:blk_1073741893_1079 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38197,DS-e0ccde86-205f-4834-b07f-b6930e81338d,DISK], DatanodeInfoWithStorage[127.0.0.1:33065,DS-cf54d914-b4bf-4dee-84bc-66450ddb912c,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:33065,DS-cf54d914-b4bf-4dee-84bc-66450ddb912c,DISK]) is bad. 2024-12-05T00:26:03,250 WARN [Thread-1053 {}] hdfs.DataStreamer(1850): Abandoning BP-1914787219-172.17.0.2-1733358323877:blk_1073741893_1079 2024-12-05T00:26:03,250 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-570305084_22 at /127.0.0.1:54468 [Receiving block BP-1914787219-172.17.0.2-1733358323877:blk_1073741893_1079] {}] datanode.BlockReceiver(316): Block 1073741893 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-05T00:26:03,250 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-570305084_22 at /127.0.0.1:54468 [Receiving block BP-1914787219-172.17.0.2-1733358323877:blk_1073741893_1079] {}] datanode.DataXceiver(331): 127.0.0.1:38197:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54468 dst: /127.0.0.1:38197 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-05T00:26:03,251 WARN [Thread-1053 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33065,DS-cf54d914-b4bf-4dee-84bc-66450ddb912c,DISK] 2024-12-05T00:26:03,252 WARN [Thread-1053 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741894_1080 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-05T00:26:03,252 WARN [Thread-1053 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1914787219-172.17.0.2-1733358323877:blk_1073741894_1080 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44281,DS-0c68e330-c341-401e-ab8a-5ef4bf86e24e,DISK], DatanodeInfoWithStorage[127.0.0.1:33219,DS-32e7a19b-9b43-45a3-b613-bb1258e4827b,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44281,DS-0c68e330-c341-401e-ab8a-5ef4bf86e24e,DISK]) is bad. 2024-12-05T00:26:03,252 WARN [Thread-1053 {}] hdfs.DataStreamer(1850): Abandoning BP-1914787219-172.17.0.2-1733358323877:blk_1073741894_1080 2024-12-05T00:26:03,252 WARN [Thread-1053 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44281,DS-0c68e330-c341-401e-ab8a-5ef4bf86e24e,DISK] 2024-12-05T00:26:03,256 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33219 is added to blk_1073741895_1081 (size=5153) 2024-12-05T00:26:03,257 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38197 is added to blk_1073741895_1081 (size=5153) 2024-12-05T00:26:03,257 INFO [RS_CLOSE_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/data/hbase/meta/1588230740/.tmp/ns/f20969f189f247d4b7a8a8a3bee4b6dd 2024-12-05T00:26:03,277 DEBUG [RS_CLOSE_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/data/hbase/meta/1588230740/.tmp/table/29875df5ad4f4d11b3158766b1610f00 is 77, key is TestLogRolling-testLogRollOnDatanodeDeath/table:state/1733358326590/Put/seqid=0 2024-12-05T00:26:03,282 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38197 is added to blk_1073741896_1082 (size=5424) 2024-12-05T00:26:03,282 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33219 is added to blk_1073741896_1082 (size=5424) 2024-12-05T00:26:03,283 INFO [RS_CLOSE_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=146 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/data/hbase/meta/1588230740/.tmp/table/29875df5ad4f4d11b3158766b1610f00 2024-12-05T00:26:03,289 DEBUG [RS_CLOSE_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/data/hbase/meta/1588230740/.tmp/info/be5a281f365a456aabaf048b0b9837db as hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/data/hbase/meta/1588230740/info/be5a281f365a456aabaf048b0b9837db 2024-12-05T00:26:03,294 INFO [RS_CLOSE_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/data/hbase/meta/1588230740/info/be5a281f365a456aabaf048b0b9837db, entries=10, sequenceid=11, filesize=6.9 K 2024-12-05T00:26:03,295 DEBUG [RS_CLOSE_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/data/hbase/meta/1588230740/.tmp/ns/f20969f189f247d4b7a8a8a3bee4b6dd as hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/data/hbase/meta/1588230740/ns/f20969f189f247d4b7a8a8a3bee4b6dd 2024-12-05T00:26:03,301 INFO [RS_CLOSE_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/data/hbase/meta/1588230740/ns/f20969f189f247d4b7a8a8a3bee4b6dd, entries=2, sequenceid=11, filesize=5.0 K 2024-12-05T00:26:03,301 DEBUG [RS_CLOSE_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/data/hbase/meta/1588230740/.tmp/table/29875df5ad4f4d11b3158766b1610f00 as hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/data/hbase/meta/1588230740/table/29875df5ad4f4d11b3158766b1610f00 2024-12-05T00:26:03,306 INFO [RS_CLOSE_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/data/hbase/meta/1588230740/table/29875df5ad4f4d11b3158766b1610f00, entries=2, sequenceid=11, filesize=5.3 K 2024-12-05T00:26:03,308 INFO [RS_CLOSE_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.71 KB/1752, heapSize ~3.45 KB/3536, currentSize=0 B/0 for 1588230740 in 137ms, sequenceid=11, compaction requested=false 2024-12-05T00:26:03,313 DEBUG [RS_CLOSE_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-12-05T00:26:03,313 DEBUG [RS_CLOSE_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-05T00:26:03,314 INFO [RS_CLOSE_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-05T00:26:03,314 DEBUG [RS_CLOSE_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733358363170Running coprocessor pre-close hooks at 1733358363170Disabling compacts and flushes for region at 1733358363170Disabling writes for close at 1733358363171 (+1 ms)Obtaining lock to block concurrent updates at 1733358363171Preparing flush snapshotting stores in 1588230740 at 1733358363171Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1752, getHeapSize=3776, getOffHeapSize=0, getCellsCount=14 at 1733358363171Flushing stores of hbase:meta,,1.1588230740 at 1733358363193 (+22 ms)Flushing 1588230740/info: creating writer at 1733358363193Flushing 1588230740/info: appending metadata at 1733358363213 (+20 ms)Flushing 1588230740/info: closing flushed file at 1733358363213Flushing 1588230740/ns: creating writer at 1733358363232 (+19 ms)Flushing 1588230740/ns: appending metadata at 1733358363246 (+14 ms)Flushing 1588230740/ns: closing flushed file at 1733358363247 (+1 ms)Flushing 1588230740/table: creating writer at 1733358363263 (+16 ms)Flushing 1588230740/table: appending metadata at 1733358363277 (+14 ms)Flushing 1588230740/table: closing flushed file at 1733358363277Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4a6ccf39: reopening flushed file at 1733358363288 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@47014765: reopening flushed file at 1733358363294 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@637f6c83: reopening flushed file at 1733358363301 (+7 ms)Finished flush of dataSize ~1.71 KB/1752, heapSize ~3.45 KB/3536, currentSize=0 B/0 for 1588230740 in 137ms, sequenceid=11, compaction requested=false at 1733358363308 (+7 ms)Writing region close event to WAL at 1733358363309 (+1 ms)Running coprocessor post-close hooks at 1733358363313 (+4 ms)Closed at 1733358363313 2024-12-05T00:26:03,314 DEBUG [RS_CLOSE_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-05T00:26:03,342 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,37749,1733358324931/2113c16e5528%2C37749%2C1733358324931.1733358345001 to hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/oldWALs/2113c16e5528%2C37749%2C1733358324931.1733358345001 2024-12-05T00:26:03,371 INFO [RS:0;2113c16e5528:37749 {}] regionserver.HRegionServer(976): stopping server 2113c16e5528,37749,1733358324931; all regions closed. 2024-12-05T00:26:03,371 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:26:03,371 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:26:03,371 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:26:03,371 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:26:03,372 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:26:03,374 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38197 is added to blk_1073741888_1073 (size=825) 2024-12-05T00:26:03,374 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33219 is added to blk_1073741888_1073 (size=825) 2024-12-05T00:26:03,392 INFO [regionserver/2113c16e5528:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-05T00:26:03,393 INFO [regionserver/2113c16e5528:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-05T00:26:03,393 INFO [regionserver/2113c16e5528:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-12-05T00:26:03,496 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33219 is added to blk_1073741860_1043 (size=13591) 2024-12-05T00:26:04,174 INFO [regionserver/2113c16e5528:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-05T00:26:04,743 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath 2024-12-05T00:26:04,744 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-05T00:26:04,744 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-05T00:26:05,990 INFO [master/2113c16e5528:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-12-05T00:26:05,990 INFO [master/2113c16e5528:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-12-05T00:26:06,300 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38197 is added to blk_1073741835_1011 (size=393) 2024-12-05T00:26:06,300 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38197 is added to blk_1073741831_1007 (size=1321) 2024-12-05T00:26:07,173 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,45031,1733358326108/2113c16e5528%2C45031%2C1733358326108.1733358326307 after 4002ms 2024-12-05T00:26:07,190 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,37749,1733358324931/2113c16e5528%2C37749%2C1733358324931.meta.1733358325913.meta after 4001ms 2024-12-05T00:26:07,300 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38197 is added to blk_1073741827_1003 (size=196) 2024-12-05T00:26:07,300 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38197 is added to blk_1073741829_1005 (size=34) 2024-12-05T00:26:07,321 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@79979b89 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-1914787219-172.17.0.2-1733358323877:blk_1073741830_1006, datanode=DatanodeInfoWithStorage[127.0.0.1:35205,null,null]) java.net.ConnectException: Call From 2113c16e5528/172.17.0.2 to localhost:41801 failed on connection exception: java.net.ConnectException: Connection refused; For more details see: http://wiki.apache.org/hadoop/ConnectionRefused at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:876) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:668) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-12-05T00:26:08,171 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.fshlog.wait.on.shutdown.seconds" 2024-12-05T00:26:08,173 DEBUG [RS:1;2113c16e5528:45031 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/oldWALs 2024-12-05T00:26:08,173 INFO [RS:1;2113c16e5528:45031 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 2113c16e5528%2C45031%2C1733358326108:(num 1733358326307) 2024-12-05T00:26:08,173 DEBUG [RS:1;2113c16e5528:45031 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T00:26:08,173 INFO [RS:1;2113c16e5528:45031 {}] regionserver.LeaseManager(133): Closed leases 2024-12-05T00:26:08,173 INFO [RS:1;2113c16e5528:45031 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-05T00:26:08,174 INFO [RS:1;2113c16e5528:45031 {}] hbase.ChoreService(370): Chore service for: regionserver/2113c16e5528:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-05T00:26:08,174 INFO [RS:1;2113c16e5528:45031 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-05T00:26:08,174 INFO [RS:1;2113c16e5528:45031 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-05T00:26:08,174 INFO [RS:1;2113c16e5528:45031 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-05T00:26:08,174 INFO [regionserver/2113c16e5528:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-05T00:26:08,174 INFO [RS:1;2113c16e5528:45031 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-05T00:26:08,174 INFO [RS:1;2113c16e5528:45031 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:45031 2024-12-05T00:26:08,176 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45031-0x1018002f6bd0002, quorum=127.0.0.1:54477, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/2113c16e5528,45031,1733358326108 2024-12-05T00:26:08,176 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43963-0x1018002f6bd0000, quorum=127.0.0.1:54477, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-05T00:26:08,176 INFO [RS:1;2113c16e5528:45031 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-05T00:26:08,177 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [2113c16e5528,45031,1733358326108] 2024-12-05T00:26:08,179 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/2113c16e5528,45031,1733358326108 already deleted, retry=false 2024-12-05T00:26:08,179 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 2113c16e5528,45031,1733358326108 expired; onlineServers=1 2024-12-05T00:26:08,179 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,45031,1733358326108/2113c16e5528%2C45031%2C1733358326108.1733358326307 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.FileNotFoundException: File does not exist: /user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,45031,1733358326108/2113c16e5528%2C45031%2C1733358326108.1733358326307 at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.isFileClosed(FSDirStatAndListingOp.java:124) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.isFileClosed(FSNamesystem.java:3502) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.isFileClosed(NameNodeRpcServer.java:1248) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.isFileClosed(ClientNamenodeProtocolServerSideTranslatorPB.java:1419) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.ipc.RemoteException.instantiateException(RemoteException.java:121) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.RemoteException.unwrapRemoteException(RemoteException.java:88) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1812) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more Caused by: org.apache.hadoop.ipc.RemoteException: File does not exist: /user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,45031,1733358326108/2113c16e5528%2C45031%2C1733358326108.1733358326307 at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.isFileClosed(FSDirStatAndListingOp.java:124) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.isFileClosed(FSNamesystem.java:3502) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.isFileClosed(NameNodeRpcServer.java:1248) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.isFileClosed(ClientNamenodeProtocolServerSideTranslatorPB.java:1419) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.isFileClosed(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$isFileClosed$57(ClientNamenodeProtocolTranslatorPB.java:999) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.isFileClosed(ClientNamenodeProtocolTranslatorPB.java:999) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor115.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.isFileClosed(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor115.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.isFileClosed(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor115.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.isFileClosed(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1810) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:26:08,218 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-05T00:26:08,231 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-05T00:26:08,231 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-05T00:26:08,232 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-05T00:26:08,232 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-05T00:26:08,232 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-05T00:26:08,238 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-05T00:26:08,239 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-05T00:26:08,278 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45031-0x1018002f6bd0002, quorum=127.0.0.1:54477, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-05T00:26:08,278 INFO [RS:1;2113c16e5528:45031 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-05T00:26:08,278 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45031-0x1018002f6bd0002, quorum=127.0.0.1:54477, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-05T00:26:08,278 INFO [RS:1;2113c16e5528:45031 {}] regionserver.HRegionServer(1031): Exiting; stopping=2113c16e5528,45031,1733358326108; zookeeper connection closed. 2024-12-05T00:26:08,278 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@7d37e119 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@7d37e119 2024-12-05T00:26:08,372 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.fshlog.wait.on.shutdown.seconds" 2024-12-05T00:26:08,375 DEBUG [RS:0;2113c16e5528:37749 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/oldWALs 2024-12-05T00:26:08,375 INFO [RS:0;2113c16e5528:37749 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 2113c16e5528%2C37749%2C1733358324931.meta:.meta(num 1733358363171) 2024-12-05T00:26:08,376 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:26:08,376 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:26:08,376 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:26:08,376 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:26:08,376 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:26:08,378 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33219 is added to blk_1073741880_1064 (size=15140) 2024-12-05T00:26:08,378 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38197 is added to blk_1073741880_1064 (size=15140) 2024-12-05T00:26:08,380 DEBUG [RS:0;2113c16e5528:37749 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/oldWALs 2024-12-05T00:26:08,380 INFO [RS:0;2113c16e5528:37749 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 2113c16e5528%2C37749%2C1733358324931:(num 1733358362928) 2024-12-05T00:26:08,380 DEBUG [RS:0;2113c16e5528:37749 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T00:26:08,380 INFO [RS:0;2113c16e5528:37749 {}] regionserver.LeaseManager(133): Closed leases 2024-12-05T00:26:08,380 INFO [RS:0;2113c16e5528:37749 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-05T00:26:08,380 INFO [RS:0;2113c16e5528:37749 {}] hbase.ChoreService(370): Chore service for: regionserver/2113c16e5528:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-05T00:26:08,381 INFO [RS:0;2113c16e5528:37749 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-05T00:26:08,381 INFO [regionserver/2113c16e5528:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-05T00:26:08,381 INFO [RS:0;2113c16e5528:37749 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:37749 2024-12-05T00:26:08,389 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37749-0x1018002f6bd0001, quorum=127.0.0.1:54477, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/2113c16e5528,37749,1733358324931 2024-12-05T00:26:08,389 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43963-0x1018002f6bd0000, quorum=127.0.0.1:54477, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-05T00:26:08,389 INFO [RS:0;2113c16e5528:37749 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-05T00:26:08,390 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [2113c16e5528,37749,1733358324931] 2024-12-05T00:26:08,391 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/2113c16e5528,37749,1733358324931 already deleted, retry=false 2024-12-05T00:26:08,391 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 2113c16e5528,37749,1733358324931 expired; onlineServers=0 2024-12-05T00:26:08,391 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '2113c16e5528,43963,1733358324824' ***** 2024-12-05T00:26:08,391 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-05T00:26:08,391 INFO [M:0;2113c16e5528:43963 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-05T00:26:08,391 INFO [M:0;2113c16e5528:43963 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-05T00:26:08,392 DEBUG [M:0;2113c16e5528:43963 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-05T00:26:08,392 DEBUG [M:0;2113c16e5528:43963 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-05T00:26:08,392 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-05T00:26:08,392 DEBUG [master/2113c16e5528:0:becomeActiveMaster-HFileCleaner.large.0-1733358325233 {}] cleaner.HFileCleaner(306): Exit Thread[master/2113c16e5528:0:becomeActiveMaster-HFileCleaner.large.0-1733358325233,5,FailOnTimeoutGroup] 2024-12-05T00:26:08,392 DEBUG [master/2113c16e5528:0:becomeActiveMaster-HFileCleaner.small.0-1733358325233 {}] cleaner.HFileCleaner(306): Exit Thread[master/2113c16e5528:0:becomeActiveMaster-HFileCleaner.small.0-1733358325233,5,FailOnTimeoutGroup] 2024-12-05T00:26:08,392 INFO [M:0;2113c16e5528:43963 {}] hbase.ChoreService(370): Chore service for: master/2113c16e5528:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-05T00:26:08,392 INFO [M:0;2113c16e5528:43963 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-05T00:26:08,392 DEBUG [M:0;2113c16e5528:43963 {}] master.HMaster(1795): Stopping service threads 2024-12-05T00:26:08,392 INFO [M:0;2113c16e5528:43963 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-05T00:26:08,392 INFO [M:0;2113c16e5528:43963 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-05T00:26:08,393 INFO [M:0;2113c16e5528:43963 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-05T00:26:08,393 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-05T00:26:08,393 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43963-0x1018002f6bd0000, quorum=127.0.0.1:54477, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-05T00:26:08,393 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43963-0x1018002f6bd0000, quorum=127.0.0.1:54477, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T00:26:08,393 DEBUG [M:0;2113c16e5528:43963 {}] zookeeper.ZKUtil(347): master:43963-0x1018002f6bd0000, quorum=127.0.0.1:54477, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-05T00:26:08,393 WARN [M:0;2113c16e5528:43963 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-05T00:26:08,394 INFO [M:0;2113c16e5528:43963 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/.lastflushedseqids 2024-12-05T00:26:08,399 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33219 is added to blk_1073741897_1083 (size=130) 2024-12-05T00:26:08,400 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38197 is added to blk_1073741897_1083 (size=130) 2024-12-05T00:26:08,400 INFO [M:0;2113c16e5528:43963 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-05T00:26:08,400 INFO [M:0;2113c16e5528:43963 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-05T00:26:08,400 DEBUG [M:0;2113c16e5528:43963 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-05T00:26:08,400 INFO [M:0;2113c16e5528:43963 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-05T00:26:08,400 DEBUG [M:0;2113c16e5528:43963 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-05T00:26:08,401 DEBUG [M:0;2113c16e5528:43963 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 1 ms 2024-12-05T00:26:08,401 DEBUG [M:0;2113c16e5528:43963 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-05T00:26:08,401 INFO [M:0;2113c16e5528:43963 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.25 KB heapSize=29.49 KB 2024-12-05T00:26:08,418 DEBUG [M:0;2113c16e5528:43963 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/b2492ee6116442cc93ac8a3a37730f7c is 82, key is hbase:meta,,1/info:regioninfo/1733358325949/Put/seqid=0 2024-12-05T00:26:08,424 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38197 is added to blk_1073741898_1084 (size=5672) 2024-12-05T00:26:08,424 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33219 is added to blk_1073741898_1084 (size=5672) 2024-12-05T00:26:08,424 INFO [M:0;2113c16e5528:43963 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/b2492ee6116442cc93ac8a3a37730f7c 2024-12-05T00:26:08,447 DEBUG [M:0;2113c16e5528:43963 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/6021dc3c156243db89130901f0ba505d is 774, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1733358326596/Put/seqid=0 2024-12-05T00:26:08,452 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38197 is added to blk_1073741899_1085 (size=6255) 2024-12-05T00:26:08,452 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33219 is added to blk_1073741899_1085 (size=6255) 2024-12-05T00:26:08,453 INFO [M:0;2113c16e5528:43963 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.58 KB at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/6021dc3c156243db89130901f0ba505d 2024-12-05T00:26:08,458 INFO [M:0;2113c16e5528:43963 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 6021dc3c156243db89130901f0ba505d 2024-12-05T00:26:08,474 DEBUG [M:0;2113c16e5528:43963 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/001e668c4ee84c40b91ce7744fbb3ae2 is 69, key is 2113c16e5528,37749,1733358324931/rs:state/1733358325351/Put/seqid=0 2024-12-05T00:26:08,478 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33219 is added to blk_1073741900_1086 (size=5224) 2024-12-05T00:26:08,478 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38197 is added to blk_1073741900_1086 (size=5224) 2024-12-05T00:26:08,479 INFO [M:0;2113c16e5528:43963 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=130 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/001e668c4ee84c40b91ce7744fbb3ae2 2024-12-05T00:26:08,490 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37749-0x1018002f6bd0001, quorum=127.0.0.1:54477, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-05T00:26:08,490 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37749-0x1018002f6bd0001, quorum=127.0.0.1:54477, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-05T00:26:08,490 INFO [RS:0;2113c16e5528:37749 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-05T00:26:08,490 INFO [RS:0;2113c16e5528:37749 {}] regionserver.HRegionServer(1031): Exiting; stopping=2113c16e5528,37749,1733358324931; zookeeper connection closed. 2024-12-05T00:26:08,491 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@f9fb3ff {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@f9fb3ff 2024-12-05T00:26:08,491 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 2 regionserver(s) complete 2024-12-05T00:26:08,497 DEBUG [M:0;2113c16e5528:43963 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/0abd3d4ed2404fc9989fab37797814e3 is 52, key is load_balancer_on/state:d/1733358326091/Put/seqid=0 2024-12-05T00:26:08,501 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33219 is added to blk_1073741901_1087 (size=5056) 2024-12-05T00:26:08,502 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38197 is added to blk_1073741901_1087 (size=5056) 2024-12-05T00:26:08,502 INFO [M:0;2113c16e5528:43963 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/0abd3d4ed2404fc9989fab37797814e3 2024-12-05T00:26:08,507 DEBUG [M:0;2113c16e5528:43963 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/b2492ee6116442cc93ac8a3a37730f7c as hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/b2492ee6116442cc93ac8a3a37730f7c 2024-12-05T00:26:08,512 INFO [M:0;2113c16e5528:43963 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/b2492ee6116442cc93ac8a3a37730f7c, entries=8, sequenceid=60, filesize=5.5 K 2024-12-05T00:26:08,513 DEBUG [M:0;2113c16e5528:43963 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/6021dc3c156243db89130901f0ba505d as hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/6021dc3c156243db89130901f0ba505d 2024-12-05T00:26:08,518 INFO [M:0;2113c16e5528:43963 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 6021dc3c156243db89130901f0ba505d 2024-12-05T00:26:08,518 INFO [M:0;2113c16e5528:43963 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/6021dc3c156243db89130901f0ba505d, entries=6, sequenceid=60, filesize=6.1 K 2024-12-05T00:26:08,519 DEBUG [M:0;2113c16e5528:43963 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/001e668c4ee84c40b91ce7744fbb3ae2 as hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/001e668c4ee84c40b91ce7744fbb3ae2 2024-12-05T00:26:08,523 INFO [M:0;2113c16e5528:43963 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/001e668c4ee84c40b91ce7744fbb3ae2, entries=2, sequenceid=60, filesize=5.1 K 2024-12-05T00:26:08,524 DEBUG [M:0;2113c16e5528:43963 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/0abd3d4ed2404fc9989fab37797814e3 as hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/0abd3d4ed2404fc9989fab37797814e3 2024-12-05T00:26:08,528 INFO [M:0;2113c16e5528:43963 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/0abd3d4ed2404fc9989fab37797814e3, entries=1, sequenceid=60, filesize=4.9 K 2024-12-05T00:26:08,530 INFO [M:0;2113c16e5528:43963 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.25 KB/23805, heapSize ~29.43 KB/30136, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 129ms, sequenceid=60, compaction requested=false 2024-12-05T00:26:08,531 INFO [M:0;2113c16e5528:43963 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-05T00:26:08,532 DEBUG [M:0;2113c16e5528:43963 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733358368400Disabling compacts and flushes for region at 1733358368400Disabling writes for close at 1733358368401 (+1 ms)Obtaining lock to block concurrent updates at 1733358368401Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733358368401Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23805, getHeapSize=30136, getOffHeapSize=0, getCellsCount=71 at 1733358368401Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733358368402 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733358368402Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733358368418 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733358368418Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733358368431 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733358368447 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733358368447Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733358368458 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733358368473 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733358368473Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1733358368483 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1733358368497 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1733358368497Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7f4e34c7: reopening flushed file at 1733358368507 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7a3691ed: reopening flushed file at 1733358368513 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@140e2a82: reopening flushed file at 1733358368518 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@690642ea: reopening flushed file at 1733358368523 (+5 ms)Finished flush of dataSize ~23.25 KB/23805, heapSize ~29.43 KB/30136, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 129ms, sequenceid=60, compaction requested=false at 1733358368530 (+7 ms)Writing region close event to WAL at 1733358368531 (+1 ms)Closed at 1733358368531 2024-12-05T00:26:08,532 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:26:08,532 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:26:08,533 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:26:08,533 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:26:08,533 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:26:08,534 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38197 is added to blk_1073741877_1060 (size=1045) 2024-12-05T00:26:08,534 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33219 is added to blk_1073741877_1060 (size=1045) 2024-12-05T00:26:08,535 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-05T00:26:08,535 INFO [M:0;2113c16e5528:43963 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-05T00:26:08,535 INFO [M:0;2113c16e5528:43963 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:43963 2024-12-05T00:26:08,536 INFO [M:0;2113c16e5528:43963 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-05T00:26:08,639 INFO [M:0;2113c16e5528:43963 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-05T00:26:08,639 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43963-0x1018002f6bd0000, quorum=127.0.0.1:54477, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-05T00:26:08,639 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43963-0x1018002f6bd0000, quorum=127.0.0.1:54477, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-05T00:26:08,641 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@c1d8e25{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-05T00:26:08,642 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5af75f98{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-05T00:26:08,642 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-05T00:26:08,642 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@23df32cf{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-05T00:26:08,642 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@16a2580d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7531cff9-2fe6-fdba-fd33-24a68d0721dc/hadoop.log.dir/,STOPPED} 2024-12-05T00:26:08,643 WARN [BP-1914787219-172.17.0.2-1733358323877 heartbeating to localhost/127.0.0.1:41383 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-05T00:26:08,643 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-05T00:26:08,643 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-05T00:26:08,643 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@18b5a41 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-1914787219-172.17.0.2-1733358323877:blk_1073741837_1013, datanode=DatanodeInfoWithStorage[127.0.0.1:35205,null,null]) java.io.InterruptedIOException: DestHost:destPort localhost:41801 , LocalHost:localPort 2113c16e5528/172.17.0.2:0. Failed on local exception: java.io.InterruptedIOException: Interrupted: action=RetryAction(action=RETRY, delayMillis=1000, reason=retries get failed due to exceeded maximum allowed retries number: 10), retry policy=RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1000 MILLISECONDS) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:936) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.InterruptedIOException: Interrupted: action=RetryAction(action=RETRY, delayMillis=1000, reason=retries get failed due to exceeded maximum allowed retries number: 10), retry policy=RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1000 MILLISECONDS) at org.apache.hadoop.ipc.Client$Connection.handleConnectionFailure(Client.java:963) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:691) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more Caused by: java.lang.InterruptedException: sleep interrupted at java.lang.Thread.sleep(Native Method) ~[?:?] at org.apache.hadoop.ipc.Client$Connection.handleConnectionFailure(Client.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:691) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-12-05T00:26:08,643 WARN [BP-1914787219-172.17.0.2-1733358323877 heartbeating to localhost/127.0.0.1:41383 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1914787219-172.17.0.2-1733358323877 (Datanode Uuid 62e5d8db-0dd5-4ee1-910d-f64b93970567) service to localhost/127.0.0.1:41383 2024-12-05T00:26:08,644 ERROR [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@18b5a41 {}] datanode.DataNode(1743): Cannot find BPOfferService for reporting block received for bpid=BP-1914787219-172.17.0.2-1733358323877 2024-12-05T00:26:08,644 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7531cff9-2fe6-fdba-fd33-24a68d0721dc/cluster_d40ff393-c986-a4ef-b5d0-88d02549ef9a/data/data3/current/BP-1914787219-172.17.0.2-1733358323877 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-05T00:26:08,645 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7531cff9-2fe6-fdba-fd33-24a68d0721dc/cluster_d40ff393-c986-a4ef-b5d0-88d02549ef9a/data/data4/current/BP-1914787219-172.17.0.2-1733358323877 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-05T00:26:08,645 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@18b5a41 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-1914787219-172.17.0.2-1733358323877:blk_1073741834_1010, datanode=DatanodeInfoWithStorage[127.0.0.1:35205,null,null]) java.io.IOException: No block pool offer service for bpid=BP-1914787219-172.17.0.2-1733358323877 at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.getDatanodeID(BlockRecoveryWorker.java:539) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$000(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:131) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-05T00:26:08,645 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-05T00:26:08,645 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@18b5a41 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-1914787219-172.17.0.2-1733358323877:blk_1073741834_1010, datanode=DatanodeInfoWithStorage[127.0.0.1:33219,null,null]) java.io.IOException: No block pool offer service for bpid=BP-1914787219-172.17.0.2-1733358323877 at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.getDatanodeID(BlockRecoveryWorker.java:539) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$000(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:131) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-05T00:26:08,645 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@18b5a41 {}] datanode.BlockRecoveryWorker$1(605): recover Block: RecoveringBlock{BP-1914787219-172.17.0.2-1733358323877:blk_1073741834_1010; getBlockSize()=85; corrupt=false; offset=-1; locs=[DatanodeInfoWithStorage[127.0.0.1:35205,null,null], DatanodeInfoWithStorage[127.0.0.1:33219,null,null]]; cachedLocs=[]} FAILED: java.io.IOException: All datanodes failed: block=BP-1914787219-172.17.0.2-1733358323877:blk_1073741834_1010, datanodeids=[DatanodeInfoWithStorage[127.0.0.1:35205,null,null], DatanodeInfoWithStorage[127.0.0.1:33219,null,null]] 2024-12-05T00:26:08,647 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3c5438f9{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-05T00:26:08,648 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2e7a8425{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-05T00:26:08,648 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-05T00:26:08,648 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@51f59516{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-05T00:26:08,648 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@36d0b5ff{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7531cff9-2fe6-fdba-fd33-24a68d0721dc/hadoop.log.dir/,STOPPED} 2024-12-05T00:26:08,649 WARN [BP-1914787219-172.17.0.2-1733358323877 heartbeating to localhost/127.0.0.1:41383 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-05T00:26:08,649 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-05T00:26:08,649 WARN [BP-1914787219-172.17.0.2-1733358323877 heartbeating to localhost/127.0.0.1:41383 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1914787219-172.17.0.2-1733358323877 (Datanode Uuid 675b84f7-6e45-4dbb-ac2e-cec9da575f5c) service to localhost/127.0.0.1:41383 2024-12-05T00:26:08,649 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-05T00:26:08,650 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7531cff9-2fe6-fdba-fd33-24a68d0721dc/cluster_d40ff393-c986-a4ef-b5d0-88d02549ef9a/data/data5/current/BP-1914787219-172.17.0.2-1733358323877 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-05T00:26:08,650 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7531cff9-2fe6-fdba-fd33-24a68d0721dc/cluster_d40ff393-c986-a4ef-b5d0-88d02549ef9a/data/data6/current/BP-1914787219-172.17.0.2-1733358323877 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-05T00:26:08,650 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-05T00:26:08,656 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7cd2a640{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-05T00:26:08,656 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@64b7b556{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-05T00:26:08,656 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-05T00:26:08,657 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3ed3a961{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-05T00:26:08,657 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6a488aac{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7531cff9-2fe6-fdba-fd33-24a68d0721dc/hadoop.log.dir/,STOPPED} 2024-12-05T00:26:08,665 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-05T00:26:08,692 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-05T00:26:08,704 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnDatanodeDeath Thread=153 (was 78) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:41383 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Close-WAL-Writer-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:175) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$900/0x00007f66d0bf5e20.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: LeaseRenewer:jenkins.hfs.1@localhost:37323 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-9-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-14-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.2@localhost:41383 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:41383 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-8 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-6 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-9 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-15-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:41383 from jenkins.hfs.3 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HMaster-EventLoopGroup-5-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-15-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:41383 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-11 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-15-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-9-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-10 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-5 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:41383 from jenkins.hfs.2 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HMaster-EventLoopGroup-5-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-14-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:41383 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-14-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:41383 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Close-WAL-Writer-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:175) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$900/0x00007f66d0bf5e20.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-7 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:41383 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.3@localhost:41383 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:41383 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:37323 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=452 (was 404) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=176 (was 277), ProcessCount=11 (was 11), AvailableMemoryMB=8866 (was 10028) 2024-12-05T00:26:08,713 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnPipelineRestart Thread=153, OpenFileDescriptor=452, MaxFileDescriptor=1048576, SystemLoadAverage=176, ProcessCount=11, AvailableMemoryMB=8866 2024-12-05T00:26:08,713 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-05T00:26:08,713 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7531cff9-2fe6-fdba-fd33-24a68d0721dc/hadoop.log.dir so I do NOT create it in target/test-data/d645b8b8-8a15-95c6-84d0-ba5f777cf379 2024-12-05T00:26:08,713 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7531cff9-2fe6-fdba-fd33-24a68d0721dc/hadoop.tmp.dir so I do NOT create it in target/test-data/d645b8b8-8a15-95c6-84d0-ba5f777cf379 2024-12-05T00:26:08,713 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d645b8b8-8a15-95c6-84d0-ba5f777cf379/cluster_fb2be59a-c7c1-a812-1afa-4bf3219bf7b4, deleteOnExit=true 2024-12-05T00:26:08,713 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-12-05T00:26:08,714 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d645b8b8-8a15-95c6-84d0-ba5f777cf379/test.cache.data in system properties and HBase conf 2024-12-05T00:26:08,714 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d645b8b8-8a15-95c6-84d0-ba5f777cf379/hadoop.tmp.dir in system properties and HBase conf 2024-12-05T00:26:08,714 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d645b8b8-8a15-95c6-84d0-ba5f777cf379/hadoop.log.dir in system properties and HBase conf 2024-12-05T00:26:08,714 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d645b8b8-8a15-95c6-84d0-ba5f777cf379/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-05T00:26:08,714 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d645b8b8-8a15-95c6-84d0-ba5f777cf379/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-05T00:26:08,714 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-05T00:26:08,714 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-05T00:26:08,714 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d645b8b8-8a15-95c6-84d0-ba5f777cf379/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-05T00:26:08,714 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d645b8b8-8a15-95c6-84d0-ba5f777cf379/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-05T00:26:08,714 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d645b8b8-8a15-95c6-84d0-ba5f777cf379/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-05T00:26:08,714 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d645b8b8-8a15-95c6-84d0-ba5f777cf379/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-05T00:26:08,714 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d645b8b8-8a15-95c6-84d0-ba5f777cf379/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-05T00:26:08,715 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d645b8b8-8a15-95c6-84d0-ba5f777cf379/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-05T00:26:08,715 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d645b8b8-8a15-95c6-84d0-ba5f777cf379/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-05T00:26:08,715 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d645b8b8-8a15-95c6-84d0-ba5f777cf379/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-05T00:26:08,715 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d645b8b8-8a15-95c6-84d0-ba5f777cf379/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-05T00:26:08,715 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d645b8b8-8a15-95c6-84d0-ba5f777cf379/nfs.dump.dir in system properties and HBase conf 2024-12-05T00:26:08,715 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d645b8b8-8a15-95c6-84d0-ba5f777cf379/java.io.tmpdir in system properties and HBase conf 2024-12-05T00:26:08,715 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d645b8b8-8a15-95c6-84d0-ba5f777cf379/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-05T00:26:08,715 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d645b8b8-8a15-95c6-84d0-ba5f777cf379/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-05T00:26:08,715 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d645b8b8-8a15-95c6-84d0-ba5f777cf379/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-05T00:26:08,728 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-05T00:26:08,741 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-05T00:26:08,756 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-05T00:26:08,756 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-05T00:26:08,756 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-05T00:26:08,757 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-05T00:26:08,757 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-05T00:26:08,758 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-05T00:26:08,760 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-05T00:26:08,761 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-05T00:26:08,761 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-05T00:26:08,763 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-05T00:26:08,830 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-05T00:26:08,836 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-05T00:26:08,837 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-05T00:26:08,838 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-05T00:26:08,838 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-05T00:26:08,838 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-05T00:26:08,839 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@27a49013{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d645b8b8-8a15-95c6-84d0-ba5f777cf379/hadoop.log.dir/,AVAILABLE} 2024-12-05T00:26:08,839 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@662aecf5{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-05T00:26:08,954 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@347a2271{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d645b8b8-8a15-95c6-84d0-ba5f777cf379/java.io.tmpdir/jetty-localhost-36917-hadoop-hdfs-3_4_1-tests_jar-_-any-17988947901639339976/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-05T00:26:08,954 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@464ea64c{HTTP/1.1, (http/1.1)}{localhost:36917} 2024-12-05T00:26:08,954 INFO [Time-limited test {}] server.Server(415): Started @149582ms 2024-12-05T00:26:08,967 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-05T00:26:09,040 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-05T00:26:09,043 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-05T00:26:09,044 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-05T00:26:09,044 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-05T00:26:09,044 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-05T00:26:09,045 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@165d0fad{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d645b8b8-8a15-95c6-84d0-ba5f777cf379/hadoop.log.dir/,AVAILABLE} 2024-12-05T00:26:09,045 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3d6c2bfb{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-05T00:26:09,159 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@109832d2{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d645b8b8-8a15-95c6-84d0-ba5f777cf379/java.io.tmpdir/jetty-localhost-35305-hadoop-hdfs-3_4_1-tests_jar-_-any-3637120632784921945/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-05T00:26:09,160 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5fec76d{HTTP/1.1, (http/1.1)}{localhost:35305} 2024-12-05T00:26:09,160 INFO [Time-limited test {}] server.Server(415): Started @149787ms 2024-12-05T00:26:09,162 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-05T00:26:09,180 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,45031,1733358326108/2113c16e5528%2C45031%2C1733358326108.1733358326307 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:26:09,191 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,37749,1733358324931/2113c16e5528%2C37749%2C1733358324931.meta.1733358325913.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:26:09,193 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-05T00:26:09,196 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-05T00:26:09,197 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-05T00:26:09,197 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-05T00:26:09,197 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-05T00:26:09,198 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3fd7563{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d645b8b8-8a15-95c6-84d0-ba5f777cf379/hadoop.log.dir/,AVAILABLE} 2024-12-05T00:26:09,198 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@471bebfc{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-05T00:26:09,283 WARN [Thread-1183 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d645b8b8-8a15-95c6-84d0-ba5f777cf379/cluster_fb2be59a-c7c1-a812-1afa-4bf3219bf7b4/data/data1/current/BP-309723592-172.17.0.2-1733358368776/current, will proceed with Du for space computation calculation, 2024-12-05T00:26:09,283 WARN [Thread-1184 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d645b8b8-8a15-95c6-84d0-ba5f777cf379/cluster_fb2be59a-c7c1-a812-1afa-4bf3219bf7b4/data/data2/current/BP-309723592-172.17.0.2-1733358368776/current, will proceed with Du for space computation calculation, 2024-12-05T00:26:09,304 WARN [Thread-1162 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-05T00:26:09,307 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xc22a335016aa5367 with lease ID 0x2f1498a11ac457db: Processing first storage report for DS-9be2a434-390b-4c40-8bf6-634cd8e7bb81 from datanode DatanodeRegistration(127.0.0.1:32793, datanodeUuid=cf735d57-64b3-4247-acfb-7d0dd400132b, infoPort=46273, infoSecurePort=0, ipcPort=43409, storageInfo=lv=-57;cid=testClusterID;nsid=1631785924;c=1733358368776) 2024-12-05T00:26:09,307 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc22a335016aa5367 with lease ID 0x2f1498a11ac457db: from storage DS-9be2a434-390b-4c40-8bf6-634cd8e7bb81 node DatanodeRegistration(127.0.0.1:32793, datanodeUuid=cf735d57-64b3-4247-acfb-7d0dd400132b, infoPort=46273, infoSecurePort=0, ipcPort=43409, storageInfo=lv=-57;cid=testClusterID;nsid=1631785924;c=1733358368776), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-05T00:26:09,307 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xc22a335016aa5367 with lease ID 0x2f1498a11ac457db: Processing first storage report for DS-dff7ce02-e792-4af4-9cf4-28276243c9a4 from datanode DatanodeRegistration(127.0.0.1:32793, datanodeUuid=cf735d57-64b3-4247-acfb-7d0dd400132b, infoPort=46273, infoSecurePort=0, ipcPort=43409, storageInfo=lv=-57;cid=testClusterID;nsid=1631785924;c=1733358368776) 2024-12-05T00:26:09,307 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc22a335016aa5367 with lease ID 0x2f1498a11ac457db: from storage DS-dff7ce02-e792-4af4-9cf4-28276243c9a4 node DatanodeRegistration(127.0.0.1:32793, datanodeUuid=cf735d57-64b3-4247-acfb-7d0dd400132b, infoPort=46273, infoSecurePort=0, ipcPort=43409, storageInfo=lv=-57;cid=testClusterID;nsid=1631785924;c=1733358368776), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-05T00:26:09,329 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2835f29c{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d645b8b8-8a15-95c6-84d0-ba5f777cf379/java.io.tmpdir/jetty-localhost-44705-hadoop-hdfs-3_4_1-tests_jar-_-any-7834269145088595566/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-05T00:26:09,330 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@14a79ae9{HTTP/1.1, (http/1.1)}{localhost:44705} 2024-12-05T00:26:09,330 INFO [Time-limited test {}] server.Server(415): Started @149957ms 2024-12-05T00:26:09,331 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-05T00:26:09,433 WARN [Thread-1209 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d645b8b8-8a15-95c6-84d0-ba5f777cf379/cluster_fb2be59a-c7c1-a812-1afa-4bf3219bf7b4/data/data3/current/BP-309723592-172.17.0.2-1733358368776/current, will proceed with Du for space computation calculation, 2024-12-05T00:26:09,434 WARN [Thread-1210 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d645b8b8-8a15-95c6-84d0-ba5f777cf379/cluster_fb2be59a-c7c1-a812-1afa-4bf3219bf7b4/data/data4/current/BP-309723592-172.17.0.2-1733358368776/current, will proceed with Du for space computation calculation, 2024-12-05T00:26:09,450 WARN [Thread-1198 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-05T00:26:09,452 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x5a23961420e81988 with lease ID 0x2f1498a11ac457dc: Processing first storage report for DS-ec3f72de-df1b-4321-8c73-9d1f496e4794 from datanode DatanodeRegistration(127.0.0.1:34997, datanodeUuid=27bbf400-a92b-4219-99da-be3681e7f296, infoPort=34907, infoSecurePort=0, ipcPort=46275, storageInfo=lv=-57;cid=testClusterID;nsid=1631785924;c=1733358368776) 2024-12-05T00:26:09,452 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x5a23961420e81988 with lease ID 0x2f1498a11ac457dc: from storage DS-ec3f72de-df1b-4321-8c73-9d1f496e4794 node DatanodeRegistration(127.0.0.1:34997, datanodeUuid=27bbf400-a92b-4219-99da-be3681e7f296, infoPort=34907, infoSecurePort=0, ipcPort=46275, storageInfo=lv=-57;cid=testClusterID;nsid=1631785924;c=1733358368776), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-05T00:26:09,452 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x5a23961420e81988 with lease ID 0x2f1498a11ac457dc: Processing first storage report for DS-48e0e437-d4db-48aa-aa1c-9df4eed3dcf6 from datanode DatanodeRegistration(127.0.0.1:34997, datanodeUuid=27bbf400-a92b-4219-99da-be3681e7f296, infoPort=34907, infoSecurePort=0, ipcPort=46275, storageInfo=lv=-57;cid=testClusterID;nsid=1631785924;c=1733358368776) 2024-12-05T00:26:09,452 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x5a23961420e81988 with lease ID 0x2f1498a11ac457dc: from storage DS-48e0e437-d4db-48aa-aa1c-9df4eed3dcf6 node DatanodeRegistration(127.0.0.1:34997, datanodeUuid=27bbf400-a92b-4219-99da-be3681e7f296, infoPort=34907, infoSecurePort=0, ipcPort=46275, storageInfo=lv=-57;cid=testClusterID;nsid=1631785924;c=1733358368776), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-05T00:26:09,454 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d645b8b8-8a15-95c6-84d0-ba5f777cf379 2024-12-05T00:26:09,456 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d645b8b8-8a15-95c6-84d0-ba5f777cf379/cluster_fb2be59a-c7c1-a812-1afa-4bf3219bf7b4/zookeeper_0, clientPort=51550, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d645b8b8-8a15-95c6-84d0-ba5f777cf379/cluster_fb2be59a-c7c1-a812-1afa-4bf3219bf7b4/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d645b8b8-8a15-95c6-84d0-ba5f777cf379/cluster_fb2be59a-c7c1-a812-1afa-4bf3219bf7b4/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-05T00:26:09,457 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=51550 2024-12-05T00:26:09,457 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-05T00:26:09,459 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-05T00:26:09,466 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34997 is added to blk_1073741825_1001 (size=7) 2024-12-05T00:26:09,467 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32793 is added to blk_1073741825_1001 (size=7) 2024-12-05T00:26:09,468 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:39785/user/jenkins/test-data/8e362bed-ad9f-3335-90a0-9a3552455a83 with version=8 2024-12-05T00:26:09,468 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:45273/user/jenkins/test-data/7f70659c-3a14-bb99-79d3-4868f5326e37/hbase-staging 2024-12-05T00:26:09,470 INFO [Time-limited test {}] client.ConnectionUtils(128): master/2113c16e5528:0 server-side Connection retries=45 2024-12-05T00:26:09,470 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-05T00:26:09,470 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-05T00:26:09,470 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-05T00:26:09,470 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-05T00:26:09,470 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-05T00:26:09,470 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-05T00:26:09,470 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-05T00:26:09,471 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:46237 2024-12-05T00:26:09,472 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:46237 connecting to ZooKeeper ensemble=127.0.0.1:51550 2024-12-05T00:26:09,478 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:462370x0, quorum=127.0.0.1:51550, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-05T00:26:09,479 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:46237-0x1018003a53f0000 connected 2024-12-05T00:26:09,493 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-05T00:26:09,494 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-05T00:26:09,496 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:46237-0x1018003a53f0000, quorum=127.0.0.1:51550, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-05T00:26:09,497 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:39785/user/jenkins/test-data/8e362bed-ad9f-3335-90a0-9a3552455a83, hbase.cluster.distributed=false 2024-12-05T00:26:09,498 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:46237-0x1018003a53f0000, quorum=127.0.0.1:51550, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-05T00:26:09,498 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=46237 2024-12-05T00:26:09,499 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=46237 2024-12-05T00:26:09,500 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=46237 2024-12-05T00:26:09,501 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=46237 2024-12-05T00:26:09,502 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=46237 2024-12-05T00:26:09,517 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/2113c16e5528:0 server-side Connection retries=45 2024-12-05T00:26:09,518 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-05T00:26:09,518 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-05T00:26:09,518 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-05T00:26:09,518 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-05T00:26:09,518 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-05T00:26:09,518 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-05T00:26:09,518 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-05T00:26:09,519 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:34229 2024-12-05T00:26:09,520 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:34229 connecting to ZooKeeper ensemble=127.0.0.1:51550 2024-12-05T00:26:09,520 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-05T00:26:09,522 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-05T00:26:09,528 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:342290x0, quorum=127.0.0.1:51550, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-05T00:26:09,529 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:342290x0, quorum=127.0.0.1:51550, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-05T00:26:09,529 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:34229-0x1018003a53f0001 connected 2024-12-05T00:26:09,529 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-05T00:26:09,530 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-05T00:26:09,531 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:34229-0x1018003a53f0001, quorum=127.0.0.1:51550, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-05T00:26:09,532 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:34229-0x1018003a53f0001, quorum=127.0.0.1:51550, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-05T00:26:09,535 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=34229 2024-12-05T00:26:09,536 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=34229 2024-12-05T00:26:09,536 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=34229 2024-12-05T00:26:09,538 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=34229 2024-12-05T00:26:09,540 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=34229 2024-12-05T00:26:09,552 DEBUG [M:0;2113c16e5528:46237 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;2113c16e5528:46237 2024-12-05T00:26:09,553 INFO [master/2113c16e5528:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/2113c16e5528,46237,1733358369470 2024-12-05T00:26:09,554 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46237-0x1018003a53f0000, quorum=127.0.0.1:51550, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-05T00:26:09,554 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34229-0x1018003a53f0001, quorum=127.0.0.1:51550, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-05T00:26:09,555 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:46237-0x1018003a53f0000, quorum=127.0.0.1:51550, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/2113c16e5528,46237,1733358369470 2024-12-05T00:26:09,556 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34229-0x1018003a53f0001, quorum=127.0.0.1:51550, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-05T00:26:09,556 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34229-0x1018003a53f0001, quorum=127.0.0.1:51550, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T00:26:09,556 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46237-0x1018003a53f0000, quorum=127.0.0.1:51550, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T00:26:09,557 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:46237-0x1018003a53f0000, quorum=127.0.0.1:51550, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-05T00:26:09,557 INFO [master/2113c16e5528:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/2113c16e5528,46237,1733358369470 from backup master directory 2024-12-05T00:26:09,558 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46237-0x1018003a53f0000, quorum=127.0.0.1:51550, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/2113c16e5528,46237,1733358369470 2024-12-05T00:26:09,558 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34229-0x1018003a53f0001, quorum=127.0.0.1:51550, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-05T00:26:09,558 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46237-0x1018003a53f0000, quorum=127.0.0.1:51550, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-05T00:26:09,558 WARN [master/2113c16e5528:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-05T00:26:09,558 INFO [master/2113c16e5528:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=2113c16e5528,46237,1733358369470 2024-12-05T00:26:09,563 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:39785/user/jenkins/test-data/8e362bed-ad9f-3335-90a0-9a3552455a83/hbase.id] with ID: ecd2000e-b027-4117-9afd-98027fb2d477 2024-12-05T00:26:09,563 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:39785/user/jenkins/test-data/8e362bed-ad9f-3335-90a0-9a3552455a83/.tmp/hbase.id 2024-12-05T00:26:09,570 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32793 is added to blk_1073741826_1002 (size=42) 2024-12-05T00:26:09,570 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34997 is added to blk_1073741826_1002 (size=42) 2024-12-05T00:26:09,571 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:39785/user/jenkins/test-data/8e362bed-ad9f-3335-90a0-9a3552455a83/.tmp/hbase.id]:[hdfs://localhost:39785/user/jenkins/test-data/8e362bed-ad9f-3335-90a0-9a3552455a83/hbase.id] 2024-12-05T00:26:09,583 INFO [master/2113c16e5528:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-05T00:26:09,583 INFO [master/2113c16e5528:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-05T00:26:09,584 INFO [master/2113c16e5528:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-12-05T00:26:09,588 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34229-0x1018003a53f0001, quorum=127.0.0.1:51550, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T00:26:09,588 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46237-0x1018003a53f0000, quorum=127.0.0.1:51550, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T00:26:09,594 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32793 is added to blk_1073741827_1003 (size=196) 2024-12-05T00:26:09,594 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34997 is added to blk_1073741827_1003 (size=196) 2024-12-05T00:26:09,595 INFO [master/2113c16e5528:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-05T00:26:09,596 INFO [master/2113c16e5528:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-05T00:26:09,596 INFO [master/2113c16e5528:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-05T00:26:09,602 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34997 is added to blk_1073741828_1004 (size=1189) 2024-12-05T00:26:09,603 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32793 is added to blk_1073741828_1004 (size=1189) 2024-12-05T00:26:09,603 INFO [master/2113c16e5528:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:39785/user/jenkins/test-data/8e362bed-ad9f-3335-90a0-9a3552455a83/MasterData/data/master/store 2024-12-05T00:26:09,609 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32793 is added to blk_1073741829_1005 (size=34) 2024-12-05T00:26:09,610 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34997 is added to blk_1073741829_1005 (size=34) 2024-12-05T00:26:09,610 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T00:26:09,610 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-05T00:26:09,610 INFO [master/2113c16e5528:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-05T00:26:09,610 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-05T00:26:09,610 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-05T00:26:09,610 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-05T00:26:09,610 INFO [master/2113c16e5528:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-05T00:26:09,610 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733358369610Disabling compacts and flushes for region at 1733358369610Disabling writes for close at 1733358369610Writing region close event to WAL at 1733358369610Closed at 1733358369610 2024-12-05T00:26:09,611 WARN [master/2113c16e5528:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:39785/user/jenkins/test-data/8e362bed-ad9f-3335-90a0-9a3552455a83/MasterData/data/master/store/.initializing 2024-12-05T00:26:09,611 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:39785/user/jenkins/test-data/8e362bed-ad9f-3335-90a0-9a3552455a83/MasterData/WALs/2113c16e5528,46237,1733358369470 2024-12-05T00:26:09,614 INFO [master/2113c16e5528:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=2113c16e5528%2C46237%2C1733358369470, suffix=, logDir=hdfs://localhost:39785/user/jenkins/test-data/8e362bed-ad9f-3335-90a0-9a3552455a83/MasterData/WALs/2113c16e5528,46237,1733358369470, archiveDir=hdfs://localhost:39785/user/jenkins/test-data/8e362bed-ad9f-3335-90a0-9a3552455a83/MasterData/oldWALs, maxLogs=10 2024-12-05T00:26:09,614 INFO [master/2113c16e5528:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 2113c16e5528%2C46237%2C1733358369470.1733358369614 2024-12-05T00:26:09,618 INFO [master/2113c16e5528:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/8e362bed-ad9f-3335-90a0-9a3552455a83/MasterData/WALs/2113c16e5528,46237,1733358369470/2113c16e5528%2C46237%2C1733358369470.1733358369614 2024-12-05T00:26:09,621 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34907:34907),(127.0.0.1/127.0.0.1:46273:46273)] 2024-12-05T00:26:09,621 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-05T00:26:09,622 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T00:26:09,622 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-05T00:26:09,622 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-05T00:26:09,623 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-05T00:26:09,624 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-05T00:26:09,624 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T00:26:09,625 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-05T00:26:09,625 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-05T00:26:09,626 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-05T00:26:09,626 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T00:26:09,627 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T00:26:09,627 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-05T00:26:09,628 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-05T00:26:09,628 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T00:26:09,628 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T00:26:09,628 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-05T00:26:09,629 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-05T00:26:09,629 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T00:26:09,630 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T00:26:09,630 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-05T00:26:09,631 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39785/user/jenkins/test-data/8e362bed-ad9f-3335-90a0-9a3552455a83/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-05T00:26:09,631 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39785/user/jenkins/test-data/8e362bed-ad9f-3335-90a0-9a3552455a83/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-05T00:26:09,632 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-05T00:26:09,632 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-05T00:26:09,633 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-05T00:26:09,634 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-05T00:26:09,636 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39785/user/jenkins/test-data/8e362bed-ad9f-3335-90a0-9a3552455a83/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-05T00:26:09,637 INFO [master/2113c16e5528:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=697583, jitterRate=-0.11297765374183655}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-05T00:26:09,637 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733358369622Initializing all the Stores at 1733358369623 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733358369623Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733358369623Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733358369623Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733358369623Cleaning up temporary data from old regions at 1733358369632 (+9 ms)Region opened successfully at 1733358369637 (+5 ms) 2024-12-05T00:26:09,637 INFO [master/2113c16e5528:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-05T00:26:09,641 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@313fe7cf, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=2113c16e5528/172.17.0.2:0 2024-12-05T00:26:09,642 INFO [master/2113c16e5528:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-05T00:26:09,642 INFO [master/2113c16e5528:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-05T00:26:09,642 INFO [master/2113c16e5528:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-05T00:26:09,643 INFO [master/2113c16e5528:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-05T00:26:09,643 INFO [master/2113c16e5528:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-12-05T00:26:09,643 INFO [master/2113c16e5528:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-12-05T00:26:09,643 INFO [master/2113c16e5528:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-05T00:26:09,645 INFO [master/2113c16e5528:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-05T00:26:09,646 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46237-0x1018003a53f0000, quorum=127.0.0.1:51550, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-05T00:26:09,647 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-05T00:26:09,647 INFO [master/2113c16e5528:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-05T00:26:09,648 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46237-0x1018003a53f0000, quorum=127.0.0.1:51550, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-05T00:26:09,649 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-05T00:26:09,650 INFO [master/2113c16e5528:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-05T00:26:09,650 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46237-0x1018003a53f0000, quorum=127.0.0.1:51550, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-05T00:26:09,651 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-05T00:26:09,652 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46237-0x1018003a53f0000, quorum=127.0.0.1:51550, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-05T00:26:09,658 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-05T00:26:09,660 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46237-0x1018003a53f0000, quorum=127.0.0.1:51550, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-05T00:26:09,662 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-05T00:26:09,664 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46237-0x1018003a53f0000, quorum=127.0.0.1:51550, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-05T00:26:09,664 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34229-0x1018003a53f0001, quorum=127.0.0.1:51550, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-05T00:26:09,664 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46237-0x1018003a53f0000, quorum=127.0.0.1:51550, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T00:26:09,664 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34229-0x1018003a53f0001, quorum=127.0.0.1:51550, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T00:26:09,664 INFO [master/2113c16e5528:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=2113c16e5528,46237,1733358369470, sessionid=0x1018003a53f0000, setting cluster-up flag (Was=false) 2024-12-05T00:26:09,668 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34229-0x1018003a53f0001, quorum=127.0.0.1:51550, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T00:26:09,668 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46237-0x1018003a53f0000, quorum=127.0.0.1:51550, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T00:26:09,672 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-05T00:26:09,673 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=2113c16e5528,46237,1733358369470 2024-12-05T00:26:09,678 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46237-0x1018003a53f0000, quorum=127.0.0.1:51550, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T00:26:09,678 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34229-0x1018003a53f0001, quorum=127.0.0.1:51550, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T00:26:09,685 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-05T00:26:09,687 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=2113c16e5528,46237,1733358369470 2024-12-05T00:26:09,688 INFO [master/2113c16e5528:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:39785/user/jenkins/test-data/8e362bed-ad9f-3335-90a0-9a3552455a83/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-05T00:26:09,689 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-05T00:26:09,690 INFO [master/2113c16e5528:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-05T00:26:09,690 INFO [master/2113c16e5528:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-05T00:26:09,690 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 2113c16e5528,46237,1733358369470 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-05T00:26:09,691 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/2113c16e5528:0, corePoolSize=5, maxPoolSize=5 2024-12-05T00:26:09,691 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/2113c16e5528:0, corePoolSize=5, maxPoolSize=5 2024-12-05T00:26:09,691 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/2113c16e5528:0, corePoolSize=5, maxPoolSize=5 2024-12-05T00:26:09,691 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/2113c16e5528:0, corePoolSize=5, maxPoolSize=5 2024-12-05T00:26:09,691 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/2113c16e5528:0, corePoolSize=10, maxPoolSize=10 2024-12-05T00:26:09,691 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/2113c16e5528:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:26:09,691 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/2113c16e5528:0, corePoolSize=2, maxPoolSize=2 2024-12-05T00:26:09,692 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/2113c16e5528:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:26:09,693 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-05T00:26:09,694 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-05T00:26:09,694 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T00:26:09,694 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-05T00:26:09,697 INFO [master/2113c16e5528:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733358399697 2024-12-05T00:26:09,697 INFO [master/2113c16e5528:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-05T00:26:09,697 INFO [master/2113c16e5528:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-05T00:26:09,697 INFO [master/2113c16e5528:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-05T00:26:09,697 INFO [master/2113c16e5528:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-05T00:26:09,697 INFO [master/2113c16e5528:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-05T00:26:09,697 INFO [master/2113c16e5528:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-05T00:26:09,697 INFO [master/2113c16e5528:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-05T00:26:09,697 INFO [master/2113c16e5528:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-05T00:26:09,698 INFO [master/2113c16e5528:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-05T00:26:09,698 INFO [master/2113c16e5528:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-05T00:26:09,698 INFO [master/2113c16e5528:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-05T00:26:09,698 INFO [master/2113c16e5528:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-05T00:26:09,698 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/2113c16e5528:0:becomeActiveMaster-HFileCleaner.large.0-1733358369698,5,FailOnTimeoutGroup] 2024-12-05T00:26:09,699 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/2113c16e5528:0:becomeActiveMaster-HFileCleaner.small.0-1733358369699,5,FailOnTimeoutGroup] 2024-12-05T00:26:09,699 INFO [master/2113c16e5528:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-05T00:26:09,699 INFO [master/2113c16e5528:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-05T00:26:09,699 INFO [master/2113c16e5528:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-05T00:26:09,699 INFO [master/2113c16e5528:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-05T00:26:09,701 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34997 is added to blk_1073741831_1007 (size=1321) 2024-12-05T00:26:09,701 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32793 is added to blk_1073741831_1007 (size=1321) 2024-12-05T00:26:09,702 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:39785/user/jenkins/test-data/8e362bed-ad9f-3335-90a0-9a3552455a83/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-05T00:26:09,702 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:39785/user/jenkins/test-data/8e362bed-ad9f-3335-90a0-9a3552455a83 2024-12-05T00:26:09,708 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32793 is added to blk_1073741832_1008 (size=32) 2024-12-05T00:26:09,708 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34997 is added to blk_1073741832_1008 (size=32) 2024-12-05T00:26:09,709 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T00:26:09,710 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-05T00:26:09,711 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-05T00:26:09,711 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T00:26:09,712 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-05T00:26:09,712 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-05T00:26:09,713 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-05T00:26:09,713 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T00:26:09,713 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-05T00:26:09,713 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-05T00:26:09,714 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-05T00:26:09,714 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T00:26:09,715 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-05T00:26:09,715 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-05T00:26:09,716 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-05T00:26:09,716 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T00:26:09,716 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-05T00:26:09,716 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-05T00:26:09,717 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39785/user/jenkins/test-data/8e362bed-ad9f-3335-90a0-9a3552455a83/data/hbase/meta/1588230740 2024-12-05T00:26:09,717 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39785/user/jenkins/test-data/8e362bed-ad9f-3335-90a0-9a3552455a83/data/hbase/meta/1588230740 2024-12-05T00:26:09,718 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-05T00:26:09,718 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-05T00:26:09,719 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-05T00:26:09,720 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-05T00:26:09,722 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39785/user/jenkins/test-data/8e362bed-ad9f-3335-90a0-9a3552455a83/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-05T00:26:09,722 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=847048, jitterRate=0.07707764208316803}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-05T00:26:09,723 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733358369709Initializing all the Stores at 1733358369709Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733358369710 (+1 ms)Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733358369710Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733358369710Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733358369710Cleaning up temporary data from old regions at 1733358369718 (+8 ms)Region opened successfully at 1733358369723 (+5 ms) 2024-12-05T00:26:09,723 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-05T00:26:09,723 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-05T00:26:09,723 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-05T00:26:09,723 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-05T00:26:09,723 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-05T00:26:09,724 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-05T00:26:09,725 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733358369723Disabling compacts and flushes for region at 1733358369723Disabling writes for close at 1733358369723Writing region close event to WAL at 1733358369724 (+1 ms)Closed at 1733358369724 2024-12-05T00:26:09,726 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-05T00:26:09,726 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-05T00:26:09,726 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-05T00:26:09,727 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-05T00:26:09,728 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-05T00:26:09,742 INFO [RS:0;2113c16e5528:34229 {}] regionserver.HRegionServer(746): ClusterId : ecd2000e-b027-4117-9afd-98027fb2d477 2024-12-05T00:26:09,742 DEBUG [RS:0;2113c16e5528:34229 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-05T00:26:09,745 DEBUG [RS:0;2113c16e5528:34229 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-05T00:26:09,745 DEBUG [RS:0;2113c16e5528:34229 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-05T00:26:09,747 DEBUG [RS:0;2113c16e5528:34229 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-05T00:26:09,747 DEBUG [RS:0;2113c16e5528:34229 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6f4d142b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=2113c16e5528/172.17.0.2:0 2024-12-05T00:26:09,759 DEBUG [RS:0;2113c16e5528:34229 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;2113c16e5528:34229 2024-12-05T00:26:09,759 INFO [RS:0;2113c16e5528:34229 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-05T00:26:09,759 INFO [RS:0;2113c16e5528:34229 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-05T00:26:09,759 DEBUG [RS:0;2113c16e5528:34229 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-05T00:26:09,760 INFO [RS:0;2113c16e5528:34229 {}] regionserver.HRegionServer(2659): reportForDuty to master=2113c16e5528,46237,1733358369470 with port=34229, startcode=1733358369517 2024-12-05T00:26:09,760 DEBUG [RS:0;2113c16e5528:34229 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-05T00:26:09,762 INFO [HMaster-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56471, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.4 (auth:SIMPLE), service=RegionServerStatusService 2024-12-05T00:26:09,763 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46237 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 2113c16e5528,34229,1733358369517 2024-12-05T00:26:09,763 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46237 {}] master.ServerManager(517): Registering regionserver=2113c16e5528,34229,1733358369517 2024-12-05T00:26:09,764 DEBUG [RS:0;2113c16e5528:34229 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:39785/user/jenkins/test-data/8e362bed-ad9f-3335-90a0-9a3552455a83 2024-12-05T00:26:09,765 DEBUG [RS:0;2113c16e5528:34229 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:39785 2024-12-05T00:26:09,765 DEBUG [RS:0;2113c16e5528:34229 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-05T00:26:09,766 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46237-0x1018003a53f0000, quorum=127.0.0.1:51550, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-05T00:26:09,767 DEBUG [RS:0;2113c16e5528:34229 {}] zookeeper.ZKUtil(111): regionserver:34229-0x1018003a53f0001, quorum=127.0.0.1:51550, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/2113c16e5528,34229,1733358369517 2024-12-05T00:26:09,767 WARN [RS:0;2113c16e5528:34229 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-05T00:26:09,767 INFO [RS:0;2113c16e5528:34229 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-05T00:26:09,767 DEBUG [RS:0;2113c16e5528:34229 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:39785/user/jenkins/test-data/8e362bed-ad9f-3335-90a0-9a3552455a83/WALs/2113c16e5528,34229,1733358369517 2024-12-05T00:26:09,767 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [2113c16e5528,34229,1733358369517] 2024-12-05T00:26:09,770 INFO [RS:0;2113c16e5528:34229 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-05T00:26:09,772 INFO [RS:0;2113c16e5528:34229 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-05T00:26:09,772 INFO [RS:0;2113c16e5528:34229 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-05T00:26:09,772 INFO [RS:0;2113c16e5528:34229 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-05T00:26:09,773 INFO [RS:0;2113c16e5528:34229 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-05T00:26:09,773 INFO [RS:0;2113c16e5528:34229 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-05T00:26:09,774 INFO [RS:0;2113c16e5528:34229 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-05T00:26:09,774 DEBUG [RS:0;2113c16e5528:34229 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/2113c16e5528:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:26:09,774 DEBUG [RS:0;2113c16e5528:34229 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/2113c16e5528:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:26:09,774 DEBUG [RS:0;2113c16e5528:34229 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/2113c16e5528:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:26:09,774 DEBUG [RS:0;2113c16e5528:34229 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/2113c16e5528:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:26:09,774 DEBUG [RS:0;2113c16e5528:34229 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/2113c16e5528:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:26:09,774 DEBUG [RS:0;2113c16e5528:34229 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/2113c16e5528:0, corePoolSize=2, maxPoolSize=2 2024-12-05T00:26:09,774 DEBUG [RS:0;2113c16e5528:34229 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/2113c16e5528:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:26:09,774 DEBUG [RS:0;2113c16e5528:34229 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/2113c16e5528:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:26:09,774 DEBUG [RS:0;2113c16e5528:34229 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/2113c16e5528:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:26:09,775 DEBUG [RS:0;2113c16e5528:34229 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/2113c16e5528:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:26:09,775 DEBUG [RS:0;2113c16e5528:34229 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/2113c16e5528:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:26:09,775 DEBUG [RS:0;2113c16e5528:34229 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/2113c16e5528:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:26:09,775 DEBUG [RS:0;2113c16e5528:34229 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0, corePoolSize=3, maxPoolSize=3 2024-12-05T00:26:09,775 DEBUG [RS:0;2113c16e5528:34229 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/2113c16e5528:0, corePoolSize=3, maxPoolSize=3 2024-12-05T00:26:09,775 INFO [RS:0;2113c16e5528:34229 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-05T00:26:09,776 INFO [RS:0;2113c16e5528:34229 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-05T00:26:09,776 INFO [RS:0;2113c16e5528:34229 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-05T00:26:09,776 INFO [RS:0;2113c16e5528:34229 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-05T00:26:09,776 INFO [RS:0;2113c16e5528:34229 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-05T00:26:09,776 INFO [RS:0;2113c16e5528:34229 {}] hbase.ChoreService(168): Chore ScheduledChore name=2113c16e5528,34229,1733358369517-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-05T00:26:09,791 INFO [RS:0;2113c16e5528:34229 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-05T00:26:09,791 INFO [RS:0;2113c16e5528:34229 {}] hbase.ChoreService(168): Chore ScheduledChore name=2113c16e5528,34229,1733358369517-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-05T00:26:09,791 INFO [RS:0;2113c16e5528:34229 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-05T00:26:09,791 INFO [RS:0;2113c16e5528:34229 {}] regionserver.Replication(171): 2113c16e5528,34229,1733358369517 started 2024-12-05T00:26:09,806 INFO [RS:0;2113c16e5528:34229 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-05T00:26:09,806 INFO [RS:0;2113c16e5528:34229 {}] regionserver.HRegionServer(1482): Serving as 2113c16e5528,34229,1733358369517, RpcServer on 2113c16e5528/172.17.0.2:34229, sessionid=0x1018003a53f0001 2024-12-05T00:26:09,806 DEBUG [RS:0;2113c16e5528:34229 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-05T00:26:09,806 DEBUG [RS:0;2113c16e5528:34229 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 2113c16e5528,34229,1733358369517 2024-12-05T00:26:09,806 DEBUG [RS:0;2113c16e5528:34229 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '2113c16e5528,34229,1733358369517' 2024-12-05T00:26:09,806 DEBUG [RS:0;2113c16e5528:34229 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-05T00:26:09,807 DEBUG [RS:0;2113c16e5528:34229 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-05T00:26:09,807 DEBUG [RS:0;2113c16e5528:34229 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-05T00:26:09,807 DEBUG [RS:0;2113c16e5528:34229 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-05T00:26:09,807 DEBUG [RS:0;2113c16e5528:34229 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 2113c16e5528,34229,1733358369517 2024-12-05T00:26:09,807 DEBUG [RS:0;2113c16e5528:34229 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '2113c16e5528,34229,1733358369517' 2024-12-05T00:26:09,807 DEBUG [RS:0;2113c16e5528:34229 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-05T00:26:09,808 DEBUG [RS:0;2113c16e5528:34229 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-05T00:26:09,808 DEBUG [RS:0;2113c16e5528:34229 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-05T00:26:09,808 INFO [RS:0;2113c16e5528:34229 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-05T00:26:09,808 INFO [RS:0;2113c16e5528:34229 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-05T00:26:09,879 WARN [2113c16e5528:46237 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-05T00:26:09,910 INFO [RS:0;2113c16e5528:34229 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=2113c16e5528%2C34229%2C1733358369517, suffix=, logDir=hdfs://localhost:39785/user/jenkins/test-data/8e362bed-ad9f-3335-90a0-9a3552455a83/WALs/2113c16e5528,34229,1733358369517, archiveDir=hdfs://localhost:39785/user/jenkins/test-data/8e362bed-ad9f-3335-90a0-9a3552455a83/oldWALs, maxLogs=32 2024-12-05T00:26:09,911 INFO [RS:0;2113c16e5528:34229 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 2113c16e5528%2C34229%2C1733358369517.1733358369911 2024-12-05T00:26:09,917 INFO [RS:0;2113c16e5528:34229 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/8e362bed-ad9f-3335-90a0-9a3552455a83/WALs/2113c16e5528,34229,1733358369517/2113c16e5528%2C34229%2C1733358369517.1733358369911 2024-12-05T00:26:09,918 DEBUG [RS:0;2113c16e5528:34229 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46273:46273),(127.0.0.1/127.0.0.1:34907:34907)] 2024-12-05T00:26:10,129 DEBUG [2113c16e5528:46237 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-05T00:26:10,129 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=2113c16e5528,34229,1733358369517 2024-12-05T00:26:10,131 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 2113c16e5528,34229,1733358369517, state=OPENING 2024-12-05T00:26:10,133 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-05T00:26:10,134 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46237-0x1018003a53f0000, quorum=127.0.0.1:51550, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T00:26:10,134 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34229-0x1018003a53f0001, quorum=127.0.0.1:51550, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T00:26:10,135 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-05T00:26:10,135 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-05T00:26:10,135 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=2113c16e5528,34229,1733358369517}] 2024-12-05T00:26:10,135 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-05T00:26:10,180 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,45031,1733358326108/2113c16e5528%2C45031%2C1733358326108.1733358326307 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:26:10,192 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,37749,1733358324931/2113c16e5528%2C37749%2C1733358324931.meta.1733358325913.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:26:10,287 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-05T00:26:10,289 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39875, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-05T00:26:10,293 INFO [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-05T00:26:10,293 INFO [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-05T00:26:10,295 INFO [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=2113c16e5528%2C34229%2C1733358369517.meta, suffix=.meta, logDir=hdfs://localhost:39785/user/jenkins/test-data/8e362bed-ad9f-3335-90a0-9a3552455a83/WALs/2113c16e5528,34229,1733358369517, archiveDir=hdfs://localhost:39785/user/jenkins/test-data/8e362bed-ad9f-3335-90a0-9a3552455a83/oldWALs, maxLogs=32 2024-12-05T00:26:10,295 INFO [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 2113c16e5528%2C34229%2C1733358369517.meta.1733358370295.meta 2024-12-05T00:26:10,300 INFO [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/8e362bed-ad9f-3335-90a0-9a3552455a83/WALs/2113c16e5528,34229,1733358369517/2113c16e5528%2C34229%2C1733358369517.meta.1733358370295.meta 2024-12-05T00:26:10,304 DEBUG [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46273:46273),(127.0.0.1/127.0.0.1:34907:34907)] 2024-12-05T00:26:10,308 DEBUG [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-05T00:26:10,308 DEBUG [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-05T00:26:10,308 DEBUG [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-05T00:26:10,309 INFO [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-05T00:26:10,309 DEBUG [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-05T00:26:10,309 DEBUG [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T00:26:10,309 DEBUG [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-05T00:26:10,309 DEBUG [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-05T00:26:10,310 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-05T00:26:10,311 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-05T00:26:10,311 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T00:26:10,311 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-05T00:26:10,312 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-05T00:26:10,312 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-05T00:26:10,313 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T00:26:10,313 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-05T00:26:10,313 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-05T00:26:10,314 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-05T00:26:10,314 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T00:26:10,315 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-05T00:26:10,315 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-05T00:26:10,315 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-05T00:26:10,316 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T00:26:10,316 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-05T00:26:10,316 DEBUG [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-05T00:26:10,317 DEBUG [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39785/user/jenkins/test-data/8e362bed-ad9f-3335-90a0-9a3552455a83/data/hbase/meta/1588230740 2024-12-05T00:26:10,318 DEBUG [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39785/user/jenkins/test-data/8e362bed-ad9f-3335-90a0-9a3552455a83/data/hbase/meta/1588230740 2024-12-05T00:26:10,319 DEBUG [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-05T00:26:10,319 DEBUG [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-05T00:26:10,319 DEBUG [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-05T00:26:10,321 DEBUG [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-05T00:26:10,322 INFO [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=812841, jitterRate=0.03358176350593567}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-05T00:26:10,322 DEBUG [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-05T00:26:10,322 DEBUG [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733358370309Writing region info on filesystem at 1733358370309Initializing all the Stores at 1733358370310 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733358370310Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733358370310Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733358370310Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733358370310Cleaning up temporary data from old regions at 1733358370319 (+9 ms)Running coprocessor post-open hooks at 1733358370322 (+3 ms)Region opened successfully at 1733358370322 2024-12-05T00:26:10,323 INFO [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733358370287 2024-12-05T00:26:10,326 DEBUG [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-05T00:26:10,326 INFO [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-05T00:26:10,326 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=2113c16e5528,34229,1733358369517 2024-12-05T00:26:10,328 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 2113c16e5528,34229,1733358369517, state=OPEN 2024-12-05T00:26:10,332 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46237-0x1018003a53f0000, quorum=127.0.0.1:51550, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-05T00:26:10,332 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34229-0x1018003a53f0001, quorum=127.0.0.1:51550, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-05T00:26:10,332 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-05T00:26:10,332 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-05T00:26:10,332 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=2113c16e5528,34229,1733358369517 2024-12-05T00:26:10,335 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-05T00:26:10,335 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=2113c16e5528,34229,1733358369517 in 197 msec 2024-12-05T00:26:10,338 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-05T00:26:10,338 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 609 msec 2024-12-05T00:26:10,339 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-05T00:26:10,339 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-05T00:26:10,340 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-05T00:26:10,340 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=2113c16e5528,34229,1733358369517, seqNum=-1] 2024-12-05T00:26:10,341 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-05T00:26:10,342 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41529, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-05T00:26:10,348 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 657 msec 2024-12-05T00:26:10,348 INFO [master/2113c16e5528:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733358370348, completionTime=-1 2024-12-05T00:26:10,348 INFO [master/2113c16e5528:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-05T00:26:10,348 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-05T00:26:10,350 INFO [master/2113c16e5528:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-12-05T00:26:10,350 INFO [master/2113c16e5528:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733358430350 2024-12-05T00:26:10,350 INFO [master/2113c16e5528:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733358490350 2024-12-05T00:26:10,350 INFO [master/2113c16e5528:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 1 msec 2024-12-05T00:26:10,350 INFO [master/2113c16e5528:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=2113c16e5528,46237,1733358369470-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-05T00:26:10,350 INFO [master/2113c16e5528:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=2113c16e5528,46237,1733358369470-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-05T00:26:10,350 INFO [master/2113c16e5528:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=2113c16e5528,46237,1733358369470-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-05T00:26:10,351 INFO [master/2113c16e5528:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-2113c16e5528:46237, period=300000, unit=MILLISECONDS is enabled. 2024-12-05T00:26:10,351 INFO [master/2113c16e5528:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-05T00:26:10,351 INFO [master/2113c16e5528:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-05T00:26:10,353 DEBUG [master/2113c16e5528:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-05T00:26:10,354 INFO [master/2113c16e5528:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.795sec 2024-12-05T00:26:10,354 INFO [master/2113c16e5528:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-05T00:26:10,354 INFO [master/2113c16e5528:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-05T00:26:10,354 INFO [master/2113c16e5528:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-05T00:26:10,354 INFO [master/2113c16e5528:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-05T00:26:10,354 INFO [master/2113c16e5528:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-05T00:26:10,354 INFO [master/2113c16e5528:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=2113c16e5528,46237,1733358369470-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-05T00:26:10,354 INFO [master/2113c16e5528:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=2113c16e5528,46237,1733358369470-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-05T00:26:10,357 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-05T00:26:10,357 INFO [master/2113c16e5528:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-05T00:26:10,357 INFO [master/2113c16e5528:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=2113c16e5528,46237,1733358369470-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-05T00:26:10,443 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@68c19d09, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T00:26:10,443 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 2113c16e5528,46237,-1 for getting cluster id 2024-12-05T00:26:10,443 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-05T00:26:10,445 DEBUG [HMaster-EventLoopGroup-10-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'ecd2000e-b027-4117-9afd-98027fb2d477' 2024-12-05T00:26:10,446 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-05T00:26:10,446 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "ecd2000e-b027-4117-9afd-98027fb2d477" 2024-12-05T00:26:10,446 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@51e74c02, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T00:26:10,446 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [2113c16e5528,46237,-1] 2024-12-05T00:26:10,446 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-05T00:26:10,447 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T00:26:10,448 INFO [HMaster-EventLoopGroup-10-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51526, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-05T00:26:10,449 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@247f04ef, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T00:26:10,450 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-05T00:26:10,451 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=2113c16e5528,34229,1733358369517, seqNum=-1] 2024-12-05T00:26:10,451 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-05T00:26:10,452 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35012, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-05T00:26:10,454 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=2113c16e5528,46237,1733358369470 2024-12-05T00:26:10,454 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-05T00:26:10,457 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-12-05T00:26:10,457 INFO [Time-limited test {}] wal.TestLogRolling(320): Starting testLogRollOnPipelineRestart 2024-12-05T00:26:10,458 INFO [Time-limited test {}] wal.TestLogRolling(323): Replication=2 2024-12-05T00:26:10,458 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-05T00:26:10,459 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.AsyncConnectionImpl(321): The fetched master address is 2113c16e5528,46237,1733358369470 2024-12-05T00:26:10,459 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@1f5796d1 2024-12-05T00:26:10,459 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-05T00:26:10,461 INFO [HMaster-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51542, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-05T00:26:10,461 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46237 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-12-05T00:26:10,461 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46237 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-12-05T00:26:10,462 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46237 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRollOnPipelineRestart', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-05T00:26:10,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46237 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart 2024-12-05T00:26:10,465 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_PRE_OPERATION 2024-12-05T00:26:10,465 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T00:26:10,465 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46237 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRollOnPipelineRestart" procId is: 4 2024-12-05T00:26:10,466 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-05T00:26:10,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46237 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-05T00:26:10,472 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32793 is added to blk_1073741835_1011 (size=395) 2024-12-05T00:26:10,473 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34997 is added to blk_1073741835_1011 (size=395) 2024-12-05T00:26:10,475 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => cf04f666e21781f1c981b866e52fbc58, NAME => 'TestLogRolling-testLogRollOnPipelineRestart,,1733358370461.cf04f666e21781f1c981b866e52fbc58.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRollOnPipelineRestart', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:39785/user/jenkins/test-data/8e362bed-ad9f-3335-90a0-9a3552455a83 2024-12-05T00:26:10,480 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32793 is added to blk_1073741836_1012 (size=78) 2024-12-05T00:26:10,481 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34997 is added to blk_1073741836_1012 (size=78) 2024-12-05T00:26:10,481 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnPipelineRestart,,1733358370461.cf04f666e21781f1c981b866e52fbc58.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T00:26:10,481 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1722): Closing cf04f666e21781f1c981b866e52fbc58, disabling compactions & flushes 2024-12-05T00:26:10,481 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnPipelineRestart,,1733358370461.cf04f666e21781f1c981b866e52fbc58. 2024-12-05T00:26:10,481 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnPipelineRestart,,1733358370461.cf04f666e21781f1c981b866e52fbc58. 2024-12-05T00:26:10,481 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnPipelineRestart,,1733358370461.cf04f666e21781f1c981b866e52fbc58. after waiting 0 ms 2024-12-05T00:26:10,481 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnPipelineRestart,,1733358370461.cf04f666e21781f1c981b866e52fbc58. 2024-12-05T00:26:10,481 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnPipelineRestart,,1733358370461.cf04f666e21781f1c981b866e52fbc58. 2024-12-05T00:26:10,481 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1676): Region close journal for cf04f666e21781f1c981b866e52fbc58: Waiting for close lock at 1733358370481Disabling compacts and flushes for region at 1733358370481Disabling writes for close at 1733358370481Writing region close event to WAL at 1733358370481Closed at 1733358370481 2024-12-05T00:26:10,483 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_ADD_TO_META 2024-12-05T00:26:10,483 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRollOnPipelineRestart,,1733358370461.cf04f666e21781f1c981b866e52fbc58.","families":{"info":[{"qualifier":"regioninfo","vlen":77,"tag":[],"timestamp":"1733358370483"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733358370483"}]},"ts":"1733358370483"} 2024-12-05T00:26:10,486 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-05T00:26:10,487 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-05T00:26:10,487 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnPipelineRestart","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733358370487"}]},"ts":"1733358370487"} 2024-12-05T00:26:10,489 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnPipelineRestart, state=ENABLING in hbase:meta 2024-12-05T00:26:10,489 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=cf04f666e21781f1c981b866e52fbc58, ASSIGN}] 2024-12-05T00:26:10,491 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=cf04f666e21781f1c981b866e52fbc58, ASSIGN 2024-12-05T00:26:10,492 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=cf04f666e21781f1c981b866e52fbc58, ASSIGN; state=OFFLINE, location=2113c16e5528,34229,1733358369517; forceNewPlan=false, retain=false 2024-12-05T00:26:10,643 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=cf04f666e21781f1c981b866e52fbc58, regionState=OPENING, regionLocation=2113c16e5528,34229,1733358369517 2024-12-05T00:26:10,645 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-11-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=cf04f666e21781f1c981b866e52fbc58, ASSIGN because future has completed 2024-12-05T00:26:10,646 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure cf04f666e21781f1c981b866e52fbc58, server=2113c16e5528,34229,1733358369517}] 2024-12-05T00:26:10,803 INFO [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRollOnPipelineRestart,,1733358370461.cf04f666e21781f1c981b866e52fbc58. 2024-12-05T00:26:10,803 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => cf04f666e21781f1c981b866e52fbc58, NAME => 'TestLogRolling-testLogRollOnPipelineRestart,,1733358370461.cf04f666e21781f1c981b866e52fbc58.', STARTKEY => '', ENDKEY => ''} 2024-12-05T00:26:10,804 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRollOnPipelineRestart cf04f666e21781f1c981b866e52fbc58 2024-12-05T00:26:10,804 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnPipelineRestart,,1733358370461.cf04f666e21781f1c981b866e52fbc58.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T00:26:10,804 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for cf04f666e21781f1c981b866e52fbc58 2024-12-05T00:26:10,804 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for cf04f666e21781f1c981b866e52fbc58 2024-12-05T00:26:10,805 INFO [StoreOpener-cf04f666e21781f1c981b866e52fbc58-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region cf04f666e21781f1c981b866e52fbc58 2024-12-05T00:26:10,807 INFO [StoreOpener-cf04f666e21781f1c981b866e52fbc58-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region cf04f666e21781f1c981b866e52fbc58 columnFamilyName info 2024-12-05T00:26:10,807 DEBUG [StoreOpener-cf04f666e21781f1c981b866e52fbc58-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T00:26:10,807 INFO [StoreOpener-cf04f666e21781f1c981b866e52fbc58-1 {}] regionserver.HStore(327): Store=cf04f666e21781f1c981b866e52fbc58/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T00:26:10,807 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for cf04f666e21781f1c981b866e52fbc58 2024-12-05T00:26:10,808 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39785/user/jenkins/test-data/8e362bed-ad9f-3335-90a0-9a3552455a83/data/default/TestLogRolling-testLogRollOnPipelineRestart/cf04f666e21781f1c981b866e52fbc58 2024-12-05T00:26:10,808 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39785/user/jenkins/test-data/8e362bed-ad9f-3335-90a0-9a3552455a83/data/default/TestLogRolling-testLogRollOnPipelineRestart/cf04f666e21781f1c981b866e52fbc58 2024-12-05T00:26:10,809 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for cf04f666e21781f1c981b866e52fbc58 2024-12-05T00:26:10,809 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for cf04f666e21781f1c981b866e52fbc58 2024-12-05T00:26:10,810 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for cf04f666e21781f1c981b866e52fbc58 2024-12-05T00:26:10,812 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39785/user/jenkins/test-data/8e362bed-ad9f-3335-90a0-9a3552455a83/data/default/TestLogRolling-testLogRollOnPipelineRestart/cf04f666e21781f1c981b866e52fbc58/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-05T00:26:10,813 INFO [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened cf04f666e21781f1c981b866e52fbc58; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=787632, jitterRate=0.0015271008014678955}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-05T00:26:10,813 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for cf04f666e21781f1c981b866e52fbc58 2024-12-05T00:26:10,814 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for cf04f666e21781f1c981b866e52fbc58: Running coprocessor pre-open hook at 1733358370804Writing region info on filesystem at 1733358370804Initializing all the Stores at 1733358370805 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733358370805Cleaning up temporary data from old regions at 1733358370809 (+4 ms)Running coprocessor post-open hooks at 1733358370813 (+4 ms)Region opened successfully at 1733358370814 (+1 ms) 2024-12-05T00:26:10,815 INFO [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRollOnPipelineRestart,,1733358370461.cf04f666e21781f1c981b866e52fbc58., pid=6, masterSystemTime=1733358370799 2024-12-05T00:26:10,817 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRollOnPipelineRestart,,1733358370461.cf04f666e21781f1c981b866e52fbc58. 2024-12-05T00:26:10,818 INFO [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRollOnPipelineRestart,,1733358370461.cf04f666e21781f1c981b866e52fbc58. 2024-12-05T00:26:10,819 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=cf04f666e21781f1c981b866e52fbc58, regionState=OPEN, openSeqNum=2, regionLocation=2113c16e5528,34229,1733358369517 2024-12-05T00:26:10,821 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-11-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure cf04f666e21781f1c981b866e52fbc58, server=2113c16e5528,34229,1733358369517 because future has completed 2024-12-05T00:26:10,825 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-12-05T00:26:10,825 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure cf04f666e21781f1c981b866e52fbc58, server=2113c16e5528,34229,1733358369517 in 176 msec 2024-12-05T00:26:10,827 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-12-05T00:26:10,827 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=cf04f666e21781f1c981b866e52fbc58, ASSIGN in 336 msec 2024-12-05T00:26:10,828 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-05T00:26:10,828 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnPipelineRestart","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733358370828"}]},"ts":"1733358370828"} 2024-12-05T00:26:10,830 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnPipelineRestart, state=ENABLED in hbase:meta 2024-12-05T00:26:10,831 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_POST_OPERATION 2024-12-05T00:26:10,833 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart in 369 msec 2024-12-05T00:26:11,181 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,45031,1733358326108/2113c16e5528%2C45031%2C1733358326108.1733358326307 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:26:11,192 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,37749,1733358324931/2113c16e5528%2C37749%2C1733358324931.meta.1733358325913.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:26:12,181 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,45031,1733358326108/2113c16e5528%2C45031%2C1733358326108.1733358326307 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:26:12,193 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,37749,1733358324931/2113c16e5528%2C37749%2C1733358324931.meta.1733358325913.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:26:13,182 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,45031,1733358326108/2113c16e5528%2C45031%2C1733358326108.1733358326307 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:26:13,193 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,37749,1733358324931/2113c16e5528%2C37749%2C1733358324931.meta.1733358325913.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:26:14,183 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,45031,1733358326108/2113c16e5528%2C45031%2C1733358326108.1733358326307 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:26:14,194 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,37749,1733358324931/2113c16e5528%2C37749%2C1733358324931.meta.1733358325913.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:26:14,743 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-05T00:26:14,743 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-12-05T00:26:14,744 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart 2024-12-05T00:26:14,744 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart Metrics about Tables on a single HBase RegionServer 2024-12-05T00:26:14,745 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-05T00:26:14,745 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-12-05T00:26:15,183 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,45031,1733358326108/2113c16e5528%2C45031%2C1733358326108.1733358326307 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:26:15,194 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,37749,1733358324931/2113c16e5528%2C37749%2C1733358324931.meta.1733358325913.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:26:15,811 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-05T00:26:15,829 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-05T00:26:15,829 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-05T00:26:15,829 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-05T00:26:15,830 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-05T00:26:15,830 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-05T00:26:15,831 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-05T00:26:15,834 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-05T00:26:15,835 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-05T00:26:15,835 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-05T00:26:15,838 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-05T00:26:15,842 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-05T00:26:15,843 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRollOnPipelineRestart' 2024-12-05T00:26:16,184 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,45031,1733358326108/2113c16e5528%2C45031%2C1733358326108.1733358326307 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:26:16,195 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,37749,1733358324931/2113c16e5528%2C37749%2C1733358324931.meta.1733358325913.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:26:17,185 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,45031,1733358326108/2113c16e5528%2C45031%2C1733358326108.1733358326307 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:26:17,195 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,37749,1733358324931/2113c16e5528%2C37749%2C1733358324931.meta.1733358325913.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:26:18,185 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,45031,1733358326108/2113c16e5528%2C45031%2C1733358326108.1733358326307 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:26:18,196 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,37749,1733358324931/2113c16e5528%2C37749%2C1733358324931.meta.1733358325913.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:26:19,186 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,45031,1733358326108/2113c16e5528%2C45031%2C1733358326108.1733358326307 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:26:19,196 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,37749,1733358324931/2113c16e5528%2C37749%2C1733358324931.meta.1733358325913.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:26:20,187 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,45031,1733358326108/2113c16e5528%2C45031%2C1733358326108.1733358326307 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:26:20,197 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,37749,1733358324931/2113c16e5528%2C37749%2C1733358324931.meta.1733358325913.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:26:20,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46237 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-05T00:26:20,492 INFO [RPCClient-NioEventLoopGroup-4-15 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRollOnPipelineRestart completed 2024-12-05T00:26:20,492 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRollOnPipelineRestart,, stopping at row=TestLogRolling-testLogRollOnPipelineRestart ,, for max=2147483647 with caching=100 2024-12-05T00:26:20,495 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRollOnPipelineRestart 2024-12-05T00:26:20,495 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRollOnPipelineRestart,,1733358370461.cf04f666e21781f1c981b866e52fbc58. 2024-12-05T00:26:20,498 DEBUG [RPCClient-NioEventLoopGroup-4-14 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRollOnPipelineRestart', row='row1002', locateType=CURRENT is [region=TestLogRolling-testLogRollOnPipelineRestart,,1733358370461.cf04f666e21781f1c981b866e52fbc58., hostname=2113c16e5528,34229,1733358369517, seqNum=2] 2024-12-05T00:26:21,187 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,45031,1733358326108/2113c16e5528%2C45031%2C1733358326108.1733358326307 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:26:21,197 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,37749,1733358324931/2113c16e5528%2C37749%2C1733358324931.meta.1733358325913.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:26:22,188 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,45031,1733358326108/2113c16e5528%2C45031%2C1733358326108.1733358326307 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:26:22,198 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,37749,1733358324931/2113c16e5528%2C37749%2C1733358324931.meta.1733358325913.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:26:22,501 INFO [Time-limited test {}] wal.TestLogRolling(360): log.getCurrentFileName()): hdfs://localhost:39785/user/jenkins/test-data/8e362bed-ad9f-3335-90a0-9a3552455a83/WALs/2113c16e5528,34229,1733358369517/2113c16e5528%2C34229%2C1733358369517.1733358369911 2024-12-05T00:26:22,501 WARN [ResponseProcessor for block BP-309723592-172.17.0.2-1733358368776:blk_1073741830_1006 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-309723592-172.17.0.2-1733358368776:blk_1073741830_1006 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-05T00:26:22,501 WARN [ResponseProcessor for block BP-309723592-172.17.0.2-1733358368776:blk_1073741833_1009 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-309723592-172.17.0.2-1733358368776:blk_1073741833_1009 java.io.IOException: Bad response ERROR for BP-309723592-172.17.0.2-1733358368776:blk_1073741833_1009 from datanode DatanodeInfoWithStorage[127.0.0.1:34997,DS-ec3f72de-df1b-4321-8c73-9d1f496e4794,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-05T00:26:22,501 WARN [ResponseProcessor for block BP-309723592-172.17.0.2-1733358368776:blk_1073741834_1010 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-309723592-172.17.0.2-1733358368776:blk_1073741834_1010 java.io.IOException: Bad response ERROR for BP-309723592-172.17.0.2-1733358368776:blk_1073741834_1010 from datanode DatanodeInfoWithStorage[127.0.0.1:34997,DS-ec3f72de-df1b-4321-8c73-9d1f496e4794,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-05T00:26:22,502 WARN [DataStreamer for file /user/jenkins/test-data/8e362bed-ad9f-3335-90a0-9a3552455a83/MasterData/WALs/2113c16e5528,46237,1733358369470/2113c16e5528%2C46237%2C1733358369470.1733358369614 block BP-309723592-172.17.0.2-1733358368776:blk_1073741830_1006 {}] hdfs.DataStreamer(1731): Error Recovery for BP-309723592-172.17.0.2-1733358368776:blk_1073741830_1006 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34997,DS-ec3f72de-df1b-4321-8c73-9d1f496e4794,DISK], DatanodeInfoWithStorage[127.0.0.1:32793,DS-9be2a434-390b-4c40-8bf6-634cd8e7bb81,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34997,DS-ec3f72de-df1b-4321-8c73-9d1f496e4794,DISK]) is bad. 2024-12-05T00:26:22,502 WARN [DataStreamer for file /user/jenkins/test-data/8e362bed-ad9f-3335-90a0-9a3552455a83/WALs/2113c16e5528,34229,1733358369517/2113c16e5528%2C34229%2C1733358369517.meta.1733358370295.meta block BP-309723592-172.17.0.2-1733358368776:blk_1073741834_1010 {}] hdfs.DataStreamer(1731): Error Recovery for BP-309723592-172.17.0.2-1733358368776:blk_1073741834_1010 in pipeline [DatanodeInfoWithStorage[127.0.0.1:32793,DS-9be2a434-390b-4c40-8bf6-634cd8e7bb81,DISK], DatanodeInfoWithStorage[127.0.0.1:34997,DS-ec3f72de-df1b-4321-8c73-9d1f496e4794,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:34997,DS-ec3f72de-df1b-4321-8c73-9d1f496e4794,DISK]) is bad. 2024-12-05T00:26:22,502 WARN [DataStreamer for file /user/jenkins/test-data/8e362bed-ad9f-3335-90a0-9a3552455a83/WALs/2113c16e5528,34229,1733358369517/2113c16e5528%2C34229%2C1733358369517.1733358369911 block BP-309723592-172.17.0.2-1733358368776:blk_1073741833_1009 {}] hdfs.DataStreamer(1731): Error Recovery for BP-309723592-172.17.0.2-1733358368776:blk_1073741833_1009 in pipeline [DatanodeInfoWithStorage[127.0.0.1:32793,DS-9be2a434-390b-4c40-8bf6-634cd8e7bb81,DISK], DatanodeInfoWithStorage[127.0.0.1:34997,DS-ec3f72de-df1b-4321-8c73-9d1f496e4794,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:34997,DS-ec3f72de-df1b-4321-8c73-9d1f496e4794,DISK]) is bad. 2024-12-05T00:26:22,502 WARN [PacketResponder: BP-309723592-172.17.0.2-1733358368776:blk_1073741834_1010, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:34997] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-05T00:26:22,502 WARN [PacketResponder: BP-309723592-172.17.0.2-1733358368776:blk_1073741833_1009, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:34997] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-05T00:26:22,502 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1408911775_22 at /127.0.0.1:33736 [Receiving block BP-309723592-172.17.0.2-1733358368776:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:34997:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:33736 dst: /127.0.0.1:34997 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-05T00:26:22,503 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1408911775_22 at /127.0.0.1:55752 [Receiving block BP-309723592-172.17.0.2-1733358368776:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:32793:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:55752 dst: /127.0.0.1:32793 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-05T00:26:22,503 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-699230326_22 at /127.0.0.1:55782 [Receiving block BP-309723592-172.17.0.2-1733358368776:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:32793:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:55782 dst: /127.0.0.1:32793 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-05T00:26:22,503 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-699230326_22 at /127.0.0.1:55794 [Receiving block BP-309723592-172.17.0.2-1733358368776:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:32793:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:55794 dst: /127.0.0.1:32793 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-05T00:26:22,503 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-699230326_22 at /127.0.0.1:33780 [Receiving block BP-309723592-172.17.0.2-1733358368776:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:34997:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:33780 dst: /127.0.0.1:34997 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-05T00:26:22,503 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-699230326_22 at /127.0.0.1:33770 [Receiving block BP-309723592-172.17.0.2-1733358368776:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:34997:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:33770 dst: /127.0.0.1:34997 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-05T00:26:22,506 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2835f29c{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-05T00:26:22,506 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@14a79ae9{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-05T00:26:22,506 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-05T00:26:22,507 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@471bebfc{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-05T00:26:22,507 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3fd7563{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d645b8b8-8a15-95c6-84d0-ba5f777cf379/hadoop.log.dir/,STOPPED} 2024-12-05T00:26:22,508 WARN [BP-309723592-172.17.0.2-1733358368776 heartbeating to localhost/127.0.0.1:39785 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-05T00:26:22,508 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-05T00:26:22,508 WARN [BP-309723592-172.17.0.2-1733358368776 heartbeating to localhost/127.0.0.1:39785 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-309723592-172.17.0.2-1733358368776 (Datanode Uuid 27bbf400-a92b-4219-99da-be3681e7f296) service to localhost/127.0.0.1:39785 2024-12-05T00:26:22,508 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-05T00:26:22,508 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d645b8b8-8a15-95c6-84d0-ba5f777cf379/cluster_fb2be59a-c7c1-a812-1afa-4bf3219bf7b4/data/data3/current/BP-309723592-172.17.0.2-1733358368776 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-05T00:26:22,509 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d645b8b8-8a15-95c6-84d0-ba5f777cf379/cluster_fb2be59a-c7c1-a812-1afa-4bf3219bf7b4/data/data4/current/BP-309723592-172.17.0.2-1733358368776 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-05T00:26:22,509 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-05T00:26:22,521 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-05T00:26:22,524 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-05T00:26:22,525 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-05T00:26:22,525 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-05T00:26:22,525 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-05T00:26:22,526 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@392000f9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d645b8b8-8a15-95c6-84d0-ba5f777cf379/hadoop.log.dir/,AVAILABLE} 2024-12-05T00:26:22,526 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4136ef12{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-05T00:26:22,640 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@487eadc3{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d645b8b8-8a15-95c6-84d0-ba5f777cf379/java.io.tmpdir/jetty-localhost-42367-hadoop-hdfs-3_4_1-tests_jar-_-any-3332057455071101148/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-05T00:26:22,640 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6b2d1260{HTTP/1.1, (http/1.1)}{localhost:42367} 2024-12-05T00:26:22,640 INFO [Time-limited test {}] server.Server(415): Started @163268ms 2024-12-05T00:26:22,642 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-05T00:26:22,661 WARN [ResponseProcessor for block BP-309723592-172.17.0.2-1733358368776:blk_1073741833_1014 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-309723592-172.17.0.2-1733358368776:blk_1073741833_1014 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-05T00:26:22,661 WARN [ResponseProcessor for block BP-309723592-172.17.0.2-1733358368776:blk_1073741834_1015 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-309723592-172.17.0.2-1733358368776:blk_1073741834_1015 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-05T00:26:22,661 WARN [ResponseProcessor for block BP-309723592-172.17.0.2-1733358368776:blk_1073741830_1013 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-309723592-172.17.0.2-1733358368776:blk_1073741830_1013 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-05T00:26:22,661 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-699230326_22 at /127.0.0.1:53058 [Receiving block BP-309723592-172.17.0.2-1733358368776:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:32793:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:53058 dst: /127.0.0.1:32793 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-05T00:26:22,661 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1408911775_22 at /127.0.0.1:53054 [Receiving block BP-309723592-172.17.0.2-1733358368776:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:32793:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:53054 dst: /127.0.0.1:32793 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-05T00:26:22,662 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-699230326_22 at /127.0.0.1:53064 [Receiving block BP-309723592-172.17.0.2-1733358368776:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:32793:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:53064 dst: /127.0.0.1:32793 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-05T00:26:22,668 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@109832d2{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-05T00:26:22,668 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5fec76d{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-05T00:26:22,668 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-05T00:26:22,668 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3d6c2bfb{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-05T00:26:22,669 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@165d0fad{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d645b8b8-8a15-95c6-84d0-ba5f777cf379/hadoop.log.dir/,STOPPED} 2024-12-05T00:26:22,670 WARN [BP-309723592-172.17.0.2-1733358368776 heartbeating to localhost/127.0.0.1:39785 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-05T00:26:22,670 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-05T00:26:22,670 WARN [BP-309723592-172.17.0.2-1733358368776 heartbeating to localhost/127.0.0.1:39785 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-309723592-172.17.0.2-1733358368776 (Datanode Uuid cf735d57-64b3-4247-acfb-7d0dd400132b) service to localhost/127.0.0.1:39785 2024-12-05T00:26:22,670 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-05T00:26:22,670 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d645b8b8-8a15-95c6-84d0-ba5f777cf379/cluster_fb2be59a-c7c1-a812-1afa-4bf3219bf7b4/data/data1/current/BP-309723592-172.17.0.2-1733358368776 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-05T00:26:22,670 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d645b8b8-8a15-95c6-84d0-ba5f777cf379/cluster_fb2be59a-c7c1-a812-1afa-4bf3219bf7b4/data/data2/current/BP-309723592-172.17.0.2-1733358368776 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-05T00:26:22,671 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-05T00:26:22,679 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-05T00:26:22,684 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-05T00:26:22,685 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-05T00:26:22,685 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-05T00:26:22,685 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-05T00:26:22,686 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@50ba6dae{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d645b8b8-8a15-95c6-84d0-ba5f777cf379/hadoop.log.dir/,AVAILABLE} 2024-12-05T00:26:22,686 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@11fd78ef{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-05T00:26:22,732 WARN [Thread-1333 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-05T00:26:22,735 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xcd5d561fafacf037 with lease ID 0x2f1498a11ac457dd: from storage DS-ec3f72de-df1b-4321-8c73-9d1f496e4794 node DatanodeRegistration(127.0.0.1:40477, datanodeUuid=27bbf400-a92b-4219-99da-be3681e7f296, infoPort=42245, infoSecurePort=0, ipcPort=43655, storageInfo=lv=-57;cid=testClusterID;nsid=1631785924;c=1733358368776), blocks: 6, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-05T00:26:22,735 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xcd5d561fafacf037 with lease ID 0x2f1498a11ac457dd: from storage DS-48e0e437-d4db-48aa-aa1c-9df4eed3dcf6 node DatanodeRegistration(127.0.0.1:40477, datanodeUuid=27bbf400-a92b-4219-99da-be3681e7f296, infoPort=42245, infoSecurePort=0, ipcPort=43655, storageInfo=lv=-57;cid=testClusterID;nsid=1631785924;c=1733358368776), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-05T00:26:22,801 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3b408bc7{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d645b8b8-8a15-95c6-84d0-ba5f777cf379/java.io.tmpdir/jetty-localhost-37229-hadoop-hdfs-3_4_1-tests_jar-_-any-3644641281709537105/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-05T00:26:22,801 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@70d6804b{HTTP/1.1, (http/1.1)}{localhost:37229} 2024-12-05T00:26:22,802 INFO [Time-limited test {}] server.Server(415): Started @163429ms 2024-12-05T00:26:22,803 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-05T00:26:22,895 WARN [Thread-1364 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-05T00:26:22,897 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x33fb17480be5fe59 with lease ID 0x2f1498a11ac457de: from storage DS-9be2a434-390b-4c40-8bf6-634cd8e7bb81 node DatanodeRegistration(127.0.0.1:45479, datanodeUuid=cf735d57-64b3-4247-acfb-7d0dd400132b, infoPort=35287, infoSecurePort=0, ipcPort=45669, storageInfo=lv=-57;cid=testClusterID;nsid=1631785924;c=1733358368776), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-05T00:26:22,898 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x33fb17480be5fe59 with lease ID 0x2f1498a11ac457de: from storage DS-dff7ce02-e792-4af4-9cf4-28276243c9a4 node DatanodeRegistration(127.0.0.1:45479, datanodeUuid=cf735d57-64b3-4247-acfb-7d0dd400132b, infoPort=35287, infoSecurePort=0, ipcPort=45669, storageInfo=lv=-57;cid=testClusterID;nsid=1631785924;c=1733358368776), blocks: 6, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-05T00:26:23,189 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,45031,1733358326108/2113c16e5528%2C45031%2C1733358326108.1733358326307 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:26:23,198 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,37749,1733358324931/2113c16e5528%2C37749%2C1733358324931.meta.1733358325913.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:26:23,820 INFO [Time-limited test {}] wal.TestLogRolling(372): Data Nodes restarted 2024-12-05T00:26:23,823 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1002 2024-12-05T00:26:23,824 ERROR [FSHLog-0-hdfs://localhost:39785/user/jenkins/test-data/8e362bed-ad9f-3335-90a0-9a3552455a83-prefix:2113c16e5528,34229,1733358369517 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:32793,DS-9be2a434-390b-4c40-8bf6-634cd8e7bb81,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-05T00:26:23,824 WARN [FSHLog-0-hdfs://localhost:39785/user/jenkins/test-data/8e362bed-ad9f-3335-90a0-9a3552455a83-prefix:2113c16e5528,34229,1733358369517 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:32793,DS-9be2a434-390b-4c40-8bf6-634cd8e7bb81,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-05T00:26:23,824 DEBUG [regionserver/2113c16e5528:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 2113c16e5528%2C34229%2C1733358369517:(num 1733358369911) roll requested 2024-12-05T00:26:23,825 INFO [regionserver/2113c16e5528:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 2113c16e5528%2C34229%2C1733358369517.1733358383825 2024-12-05T00:26:23,830 DEBUG [regionserver/2113c16e5528:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:39785/user/jenkins/test-data/8e362bed-ad9f-3335-90a0-9a3552455a83/WALs/2113c16e5528,34229,1733358369517/2113c16e5528%2C34229%2C1733358369517.1733358369911 newFile=hdfs://localhost:39785/user/jenkins/test-data/8e362bed-ad9f-3335-90a0-9a3552455a83/WALs/2113c16e5528,34229,1733358369517/2113c16e5528%2C34229%2C1733358369517.1733358383825 2024-12-05T00:26:23,830 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:26:23,830 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:26:23,831 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:26:23,831 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:26:23,831 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:26:23,831 INFO [regionserver/2113c16e5528:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/8e362bed-ad9f-3335-90a0-9a3552455a83/WALs/2113c16e5528,34229,1733358369517/2113c16e5528%2C34229%2C1733358369517.1733358369911 with entries=2, filesize=1.59 KB; new WAL /user/jenkins/test-data/8e362bed-ad9f-3335-90a0-9a3552455a83/WALs/2113c16e5528,34229,1733358369517/2113c16e5528%2C34229%2C1733358369517.1733358383825 2024-12-05T00:26:23,831 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:32793,DS-9be2a434-390b-4c40-8bf6-634cd8e7bb81,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-05T00:26:23,831 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:32793,DS-9be2a434-390b-4c40-8bf6-634cd8e7bb81,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-05T00:26:23,832 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:39785/user/jenkins/test-data/8e362bed-ad9f-3335-90a0-9a3552455a83/WALs/2113c16e5528,34229,1733358369517/2113c16e5528%2C34229%2C1733358369517.1733358369911 2024-12-05T00:26:23,832 DEBUG [regionserver/2113c16e5528:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35287:35287),(127.0.0.1/127.0.0.1:42245:42245)] 2024-12-05T00:26:23,832 DEBUG [regionserver/2113c16e5528:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:39785/user/jenkins/test-data/8e362bed-ad9f-3335-90a0-9a3552455a83/WALs/2113c16e5528,34229,1733358369517/2113c16e5528%2C34229%2C1733358369517.1733358369911 is not closed yet, will try archiving it next time 2024-12-05T00:26:23,832 WARN [IPC Server handler 1 on default port 39785 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/8e362bed-ad9f-3335-90a0-9a3552455a83/WALs/2113c16e5528,34229,1733358369517/2113c16e5528%2C34229%2C1733358369517.1733358369911 has not been closed. Lease recovery is in progress. RecoveryId = 1017 for block blk_1073741833_1014 2024-12-05T00:26:23,832 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:39785/user/jenkins/test-data/8e362bed-ad9f-3335-90a0-9a3552455a83/WALs/2113c16e5528,34229,1733358369517/2113c16e5528%2C34229%2C1733358369517.1733358369911 after 0ms 2024-12-05T00:26:23,898 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45479 is added to blk_1073741833_1017 (size=1632) 2024-12-05T00:26:24,189 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,45031,1733358326108/2113c16e5528%2C45031%2C1733358326108.1733358326307 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:26:24,199 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,37749,1733358324931/2113c16e5528%2C37749%2C1733358324931.meta.1733358325913.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:26:24,734 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741833_1014: GenerationStamp not matched, existing replica is blk_1073741833_1009 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-12-05T00:26:25,190 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,45031,1733358326108/2113c16e5528%2C45031%2C1733358326108.1733358326307 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:26:25,199 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,37749,1733358324931/2113c16e5528%2C37749%2C1733358324931.meta.1733358325913.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:26:25,835 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1003 2024-12-05T00:26:26,191 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,45031,1733358326108/2113c16e5528%2C45031%2C1733358326108.1733358326307 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:26:26,199 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,37749,1733358324931/2113c16e5528%2C37749%2C1733358324931.meta.1733358325913.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:26:27,191 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,45031,1733358326108/2113c16e5528%2C45031%2C1733358326108.1733358326307 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:26:27,200 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,37749,1733358324931/2113c16e5528%2C37749%2C1733358324931.meta.1733358325913.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:26:27,833 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:39785/user/jenkins/test-data/8e362bed-ad9f-3335-90a0-9a3552455a83/WALs/2113c16e5528,34229,1733358369517/2113c16e5528%2C34229%2C1733358369517.1733358369911 after 4001ms 2024-12-05T00:26:27,838 WARN [ResponseProcessor for block BP-309723592-172.17.0.2-1733358368776:blk_1073741837_1016 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-309723592-172.17.0.2-1733358368776:blk_1073741837_1016 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-05T00:26:27,838 WARN [DataStreamer for file /user/jenkins/test-data/8e362bed-ad9f-3335-90a0-9a3552455a83/WALs/2113c16e5528,34229,1733358369517/2113c16e5528%2C34229%2C1733358369517.1733358383825 block BP-309723592-172.17.0.2-1733358368776:blk_1073741837_1016 {}] hdfs.DataStreamer(1731): Error Recovery for BP-309723592-172.17.0.2-1733358368776:blk_1073741837_1016 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45479,DS-9be2a434-390b-4c40-8bf6-634cd8e7bb81,DISK], DatanodeInfoWithStorage[127.0.0.1:40477,DS-ec3f72de-df1b-4321-8c73-9d1f496e4794,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45479,DS-9be2a434-390b-4c40-8bf6-634cd8e7bb81,DISK]) is bad. 2024-12-05T00:26:27,839 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-699230326_22 at /127.0.0.1:47666 [Receiving block BP-309723592-172.17.0.2-1733358368776:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:45479:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:47666 dst: /127.0.0.1:45479 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-05T00:26:27,839 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-699230326_22 at /127.0.0.1:60040 [Receiving block BP-309723592-172.17.0.2-1733358368776:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:40477:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60040 dst: /127.0.0.1:40477 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-05T00:26:27,840 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3b408bc7{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-05T00:26:27,840 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@70d6804b{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-05T00:26:27,840 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-05T00:26:27,841 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@11fd78ef{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-05T00:26:27,841 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@50ba6dae{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d645b8b8-8a15-95c6-84d0-ba5f777cf379/hadoop.log.dir/,STOPPED} 2024-12-05T00:26:27,842 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-05T00:26:27,842 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-05T00:26:27,843 WARN [BP-309723592-172.17.0.2-1733358368776 heartbeating to localhost/127.0.0.1:39785 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-05T00:26:27,843 WARN [BP-309723592-172.17.0.2-1733358368776 heartbeating to localhost/127.0.0.1:39785 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-309723592-172.17.0.2-1733358368776 (Datanode Uuid cf735d57-64b3-4247-acfb-7d0dd400132b) service to localhost/127.0.0.1:39785 2024-12-05T00:26:27,843 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d645b8b8-8a15-95c6-84d0-ba5f777cf379/cluster_fb2be59a-c7c1-a812-1afa-4bf3219bf7b4/data/data1/current/BP-309723592-172.17.0.2-1733358368776 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-05T00:26:27,844 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d645b8b8-8a15-95c6-84d0-ba5f777cf379/cluster_fb2be59a-c7c1-a812-1afa-4bf3219bf7b4/data/data2/current/BP-309723592-172.17.0.2-1733358368776 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-05T00:26:27,844 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-05T00:26:27,882 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-05T00:26:27,885 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-05T00:26:27,886 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-05T00:26:27,886 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-05T00:26:27,886 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-05T00:26:27,887 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@71e80301{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d645b8b8-8a15-95c6-84d0-ba5f777cf379/hadoop.log.dir/,AVAILABLE} 2024-12-05T00:26:27,887 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3f8a0d0d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-05T00:26:28,001 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2b944a8d{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d645b8b8-8a15-95c6-84d0-ba5f777cf379/java.io.tmpdir/jetty-localhost-33225-hadoop-hdfs-3_4_1-tests_jar-_-any-9158717962070770344/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-05T00:26:28,002 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3d61cf28{HTTP/1.1, (http/1.1)}{localhost:33225} 2024-12-05T00:26:28,002 INFO [Time-limited test {}] server.Server(415): Started @168629ms 2024-12-05T00:26:28,003 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-05T00:26:28,025 WARN [ResponseProcessor for block BP-309723592-172.17.0.2-1733358368776:blk_1073741837_1018 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-309723592-172.17.0.2-1733358368776:blk_1073741837_1018 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-05T00:26:28,025 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-699230326_22 at /127.0.0.1:60070 [Receiving block BP-309723592-172.17.0.2-1733358368776:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:40477:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60070 dst: /127.0.0.1:40477 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-05T00:26:28,026 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@487eadc3{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-05T00:26:28,027 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6b2d1260{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-05T00:26:28,027 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-05T00:26:28,027 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4136ef12{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-05T00:26:28,027 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@392000f9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d645b8b8-8a15-95c6-84d0-ba5f777cf379/hadoop.log.dir/,STOPPED} 2024-12-05T00:26:28,028 WARN [BP-309723592-172.17.0.2-1733358368776 heartbeating to localhost/127.0.0.1:39785 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-05T00:26:28,028 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-05T00:26:28,028 WARN [BP-309723592-172.17.0.2-1733358368776 heartbeating to localhost/127.0.0.1:39785 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-309723592-172.17.0.2-1733358368776 (Datanode Uuid 27bbf400-a92b-4219-99da-be3681e7f296) service to localhost/127.0.0.1:39785 2024-12-05T00:26:28,029 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-05T00:26:28,031 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d645b8b8-8a15-95c6-84d0-ba5f777cf379/cluster_fb2be59a-c7c1-a812-1afa-4bf3219bf7b4/data/data3/current/BP-309723592-172.17.0.2-1733358368776 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-05T00:26:28,031 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d645b8b8-8a15-95c6-84d0-ba5f777cf379/cluster_fb2be59a-c7c1-a812-1afa-4bf3219bf7b4/data/data4/current/BP-309723592-172.17.0.2-1733358368776 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-05T00:26:28,031 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-05T00:26:28,041 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-05T00:26:28,044 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-05T00:26:28,044 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-05T00:26:28,045 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-05T00:26:28,045 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-05T00:26:28,045 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@ae00177{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d645b8b8-8a15-95c6-84d0-ba5f777cf379/hadoop.log.dir/,AVAILABLE} 2024-12-05T00:26:28,045 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@49f94f8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-05T00:26:28,095 WARN [Thread-1407 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-05T00:26:28,097 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa05d4158b3f58f4d with lease ID 0x2f1498a11ac457df: from storage DS-9be2a434-390b-4c40-8bf6-634cd8e7bb81 node DatanodeRegistration(127.0.0.1:41723, datanodeUuid=cf735d57-64b3-4247-acfb-7d0dd400132b, infoPort=43539, infoSecurePort=0, ipcPort=43561, storageInfo=lv=-57;cid=testClusterID;nsid=1631785924;c=1733358368776), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-05T00:26:28,097 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa05d4158b3f58f4d with lease ID 0x2f1498a11ac457df: from storage DS-dff7ce02-e792-4af4-9cf4-28276243c9a4 node DatanodeRegistration(127.0.0.1:41723, datanodeUuid=cf735d57-64b3-4247-acfb-7d0dd400132b, infoPort=43539, infoSecurePort=0, ipcPort=43561, storageInfo=lv=-57;cid=testClusterID;nsid=1631785924;c=1733358368776), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-05T00:26:28,164 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5aaed393{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d645b8b8-8a15-95c6-84d0-ba5f777cf379/java.io.tmpdir/jetty-localhost-36499-hadoop-hdfs-3_4_1-tests_jar-_-any-234372382682785547/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-05T00:26:28,164 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7939cb3e{HTTP/1.1, (http/1.1)}{localhost:36499} 2024-12-05T00:26:28,164 INFO [Time-limited test {}] server.Server(415): Started @168792ms 2024-12-05T00:26:28,166 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-05T00:26:28,192 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,45031,1733358326108/2113c16e5528%2C45031%2C1733358326108.1733358326307 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:26:28,200 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,37749,1733358324931/2113c16e5528%2C37749%2C1733358324931.meta.1733358325913.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:26:28,252 WARN [Thread-1438 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-05T00:26:28,255 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf0dd8e111209d91f with lease ID 0x2f1498a11ac457e0: from storage DS-ec3f72de-df1b-4321-8c73-9d1f496e4794 node DatanodeRegistration(127.0.0.1:37127, datanodeUuid=27bbf400-a92b-4219-99da-be3681e7f296, infoPort=35015, infoSecurePort=0, ipcPort=46309, storageInfo=lv=-57;cid=testClusterID;nsid=1631785924;c=1733358368776), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-05T00:26:28,255 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf0dd8e111209d91f with lease ID 0x2f1498a11ac457e0: from storage DS-48e0e437-d4db-48aa-aa1c-9df4eed3dcf6 node DatanodeRegistration(127.0.0.1:37127, datanodeUuid=27bbf400-a92b-4219-99da-be3681e7f296, infoPort=35015, infoSecurePort=0, ipcPort=46309, storageInfo=lv=-57;cid=testClusterID;nsid=1631785924;c=1733358368776), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-05T00:26:29,185 INFO [Time-limited test {}] wal.TestLogRolling(389): Data Nodes restarted 2024-12-05T00:26:29,187 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1004 2024-12-05T00:26:29,188 ERROR [FSHLog-0-hdfs://localhost:39785/user/jenkins/test-data/8e362bed-ad9f-3335-90a0-9a3552455a83-prefix:2113c16e5528,34229,1733358369517 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40477,DS-ec3f72de-df1b-4321-8c73-9d1f496e4794,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-05T00:26:29,188 WARN [FSHLog-0-hdfs://localhost:39785/user/jenkins/test-data/8e362bed-ad9f-3335-90a0-9a3552455a83-prefix:2113c16e5528,34229,1733358369517 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40477,DS-ec3f72de-df1b-4321-8c73-9d1f496e4794,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-05T00:26:29,188 DEBUG [regionserver/2113c16e5528:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 2113c16e5528%2C34229%2C1733358369517:(num 1733358383825) roll requested 2024-12-05T00:26:29,189 INFO [regionserver/2113c16e5528:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 2113c16e5528%2C34229%2C1733358369517.1733358389189 2024-12-05T00:26:29,192 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,45031,1733358326108/2113c16e5528%2C45031%2C1733358326108.1733358326307 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:26:29,194 DEBUG [regionserver/2113c16e5528:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:39785/user/jenkins/test-data/8e362bed-ad9f-3335-90a0-9a3552455a83/WALs/2113c16e5528,34229,1733358369517/2113c16e5528%2C34229%2C1733358369517.1733358383825 newFile=hdfs://localhost:39785/user/jenkins/test-data/8e362bed-ad9f-3335-90a0-9a3552455a83/WALs/2113c16e5528,34229,1733358369517/2113c16e5528%2C34229%2C1733358369517.1733358389189 2024-12-05T00:26:29,194 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:26:29,194 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:26:29,194 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:26:29,194 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:26:29,194 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:26:29,195 INFO [regionserver/2113c16e5528:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/8e362bed-ad9f-3335-90a0-9a3552455a83/WALs/2113c16e5528,34229,1733358369517/2113c16e5528%2C34229%2C1733358369517.1733358383825 with entries=2, filesize=2.37 KB; new WAL /user/jenkins/test-data/8e362bed-ad9f-3335-90a0-9a3552455a83/WALs/2113c16e5528,34229,1733358369517/2113c16e5528%2C34229%2C1733358369517.1733358389189 2024-12-05T00:26:29,195 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40477,DS-ec3f72de-df1b-4321-8c73-9d1f496e4794,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-05T00:26:29,195 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40477,DS-ec3f72de-df1b-4321-8c73-9d1f496e4794,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-05T00:26:29,195 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:39785/user/jenkins/test-data/8e362bed-ad9f-3335-90a0-9a3552455a83/WALs/2113c16e5528,34229,1733358369517/2113c16e5528%2C34229%2C1733358369517.1733358383825 2024-12-05T00:26:29,195 DEBUG [regionserver/2113c16e5528:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35015:35015),(127.0.0.1/127.0.0.1:43539:43539)] 2024-12-05T00:26:29,195 WARN [IPC Server handler 4 on default port 39785 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/8e362bed-ad9f-3335-90a0-9a3552455a83/WALs/2113c16e5528,34229,1733358369517/2113c16e5528%2C34229%2C1733358369517.1733358383825 has not been closed. Lease recovery is in progress. RecoveryId = 1020 for block blk_1073741837_1018 2024-12-05T00:26:29,195 DEBUG [regionserver/2113c16e5528:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:39785/user/jenkins/test-data/8e362bed-ad9f-3335-90a0-9a3552455a83/WALs/2113c16e5528,34229,1733358369517/2113c16e5528%2C34229%2C1733358369517.1733358383825 is not closed yet, will try archiving it next time 2024-12-05T00:26:29,196 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:39785/user/jenkins/test-data/8e362bed-ad9f-3335-90a0-9a3552455a83/WALs/2113c16e5528,34229,1733358369517/2113c16e5528%2C34229%2C1733358369517.1733358383825 after 1ms 2024-12-05T00:26:29,201 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,37749,1733358324931/2113c16e5528%2C37749%2C1733358324931.meta.1733358325913.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:26:30,097 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741837_1018: GenerationStamp not matched, existing replica is blk_1073741837_1016 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-12-05T00:26:30,193 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,45031,1733358326108/2113c16e5528%2C45031%2C1733358326108.1733358326307 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:26:30,201 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,37749,1733358324931/2113c16e5528%2C37749%2C1733358324931.meta.1733358325913.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:26:31,193 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,45031,1733358326108/2113c16e5528%2C45031%2C1733358326108.1733358326307 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:26:31,197 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 2113c16e5528%2C34229%2C1733358369517.1733358391196 2024-12-05T00:26:31,202 DEBUG [Time-limited test {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:39785/user/jenkins/test-data/8e362bed-ad9f-3335-90a0-9a3552455a83/WALs/2113c16e5528,34229,1733358369517/2113c16e5528%2C34229%2C1733358369517.1733358389189 newFile=hdfs://localhost:39785/user/jenkins/test-data/8e362bed-ad9f-3335-90a0-9a3552455a83/WALs/2113c16e5528,34229,1733358369517/2113c16e5528%2C34229%2C1733358369517.1733358391196 2024-12-05T00:26:31,202 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,37749,1733358324931/2113c16e5528%2C37749%2C1733358324931.meta.1733358325913.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:26:31,202 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:26:31,202 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:26:31,202 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:26:31,202 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:26:31,203 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:26:31,203 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/8e362bed-ad9f-3335-90a0-9a3552455a83/WALs/2113c16e5528,34229,1733358369517/2113c16e5528%2C34229%2C1733358369517.1733358389189 with entries=1, filesize=1.23 KB; new WAL /user/jenkins/test-data/8e362bed-ad9f-3335-90a0-9a3552455a83/WALs/2113c16e5528,34229,1733358369517/2113c16e5528%2C34229%2C1733358369517.1733358391196 2024-12-05T00:26:31,204 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43539:43539),(127.0.0.1/127.0.0.1:35015:35015)] 2024-12-05T00:26:31,204 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:39785/user/jenkins/test-data/8e362bed-ad9f-3335-90a0-9a3552455a83/WALs/2113c16e5528,34229,1733358369517/2113c16e5528%2C34229%2C1733358369517.1733358383825 is not closed yet, will try archiving it next time 2024-12-05T00:26:31,204 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:39785/user/jenkins/test-data/8e362bed-ad9f-3335-90a0-9a3552455a83/WALs/2113c16e5528,34229,1733358369517/2113c16e5528%2C34229%2C1733358369517.1733358389189 is not closed yet, will try archiving it next time 2024-12-05T00:26:31,204 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:39785/user/jenkins/test-data/8e362bed-ad9f-3335-90a0-9a3552455a83/WALs/2113c16e5528,34229,1733358369517/2113c16e5528%2C34229%2C1733358369517.1733358369911 2024-12-05T00:26:31,204 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:39785/user/jenkins/test-data/8e362bed-ad9f-3335-90a0-9a3552455a83/WALs/2113c16e5528,34229,1733358369517/2113c16e5528%2C34229%2C1733358369517.1733358369911 2024-12-05T00:26:31,204 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:39785/user/jenkins/test-data/8e362bed-ad9f-3335-90a0-9a3552455a83/WALs/2113c16e5528,34229,1733358369517/2113c16e5528%2C34229%2C1733358369517.1733358369911 after 0ms 2024-12-05T00:26:31,205 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/8e362bed-ad9f-3335-90a0-9a3552455a83/WALs/2113c16e5528,34229,1733358369517/2113c16e5528%2C34229%2C1733358369517.1733358369911 2024-12-05T00:26:31,205 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41723 is added to blk_1073741838_1019 (size=1264) 2024-12-05T00:26:31,205 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37127 is added to blk_1073741838_1019 (size=1264) 2024-12-05T00:26:31,215 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #3: [\x00/METAFAMILY:HBASE::REGION_EVENT::REGION_OPEN/1733358370814/Put/vlen=218/seqid=0] 2024-12-05T00:26:31,215 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #4: [row1002/info:/1733358380499/Put/vlen=1045/seqid=0] 2024-12-05T00:26:31,215 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/8e362bed-ad9f-3335-90a0-9a3552455a83/WALs/2113c16e5528,34229,1733358369517/2113c16e5528%2C34229%2C1733358369517.1733358369911 2024-12-05T00:26:31,215 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:39785/user/jenkins/test-data/8e362bed-ad9f-3335-90a0-9a3552455a83/WALs/2113c16e5528,34229,1733358369517/2113c16e5528%2C34229%2C1733358369517.1733358383825 2024-12-05T00:26:31,215 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:39785/user/jenkins/test-data/8e362bed-ad9f-3335-90a0-9a3552455a83/WALs/2113c16e5528,34229,1733358369517/2113c16e5528%2C34229%2C1733358369517.1733358383825 2024-12-05T00:26:31,216 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:39785/user/jenkins/test-data/8e362bed-ad9f-3335-90a0-9a3552455a83/WALs/2113c16e5528,34229,1733358369517/2113c16e5528%2C34229%2C1733358369517.1733358383825 after 1ms 2024-12-05T00:26:31,216 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/8e362bed-ad9f-3335-90a0-9a3552455a83/WALs/2113c16e5528,34229,1733358369517/2113c16e5528%2C34229%2C1733358369517.1733358383825 2024-12-05T00:26:31,219 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #5: [row1003/info:/1733358383824/Put/vlen=1045/seqid=0] 2024-12-05T00:26:31,219 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #6: [row1004/info:/1733358385836/Put/vlen=1045/seqid=0] 2024-12-05T00:26:31,219 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/8e362bed-ad9f-3335-90a0-9a3552455a83/WALs/2113c16e5528,34229,1733358369517/2113c16e5528%2C34229%2C1733358369517.1733358383825 2024-12-05T00:26:31,219 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:39785/user/jenkins/test-data/8e362bed-ad9f-3335-90a0-9a3552455a83/WALs/2113c16e5528,34229,1733358369517/2113c16e5528%2C34229%2C1733358369517.1733358389189 2024-12-05T00:26:31,219 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:39785/user/jenkins/test-data/8e362bed-ad9f-3335-90a0-9a3552455a83/WALs/2113c16e5528,34229,1733358369517/2113c16e5528%2C34229%2C1733358369517.1733358389189 2024-12-05T00:26:31,220 WARN [IPC Server handler 3 on default port 39785 {}] namenode.FSNamesystem(3730): BLOCK* internalReleaseLease: All existing blocks are COMPLETE, lease removed, file /user/jenkins/test-data/8e362bed-ad9f-3335-90a0-9a3552455a83/WALs/2113c16e5528,34229,1733358369517/2113c16e5528%2C34229%2C1733358369517.1733358389189 closed. 2024-12-05T00:26:31,220 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:39785/user/jenkins/test-data/8e362bed-ad9f-3335-90a0-9a3552455a83/WALs/2113c16e5528,34229,1733358369517/2113c16e5528%2C34229%2C1733358369517.1733358389189 after 1ms 2024-12-05T00:26:31,220 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/8e362bed-ad9f-3335-90a0-9a3552455a83/WALs/2113c16e5528,34229,1733358369517/2113c16e5528%2C34229%2C1733358369517.1733358389189 2024-12-05T00:26:31,223 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #7: [row1005/info:/1733358389188/Put/vlen=1045/seqid=0] 2024-12-05T00:26:31,223 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:39785/user/jenkins/test-data/8e362bed-ad9f-3335-90a0-9a3552455a83/WALs/2113c16e5528,34229,1733358369517/2113c16e5528%2C34229%2C1733358369517.1733358391196 2024-12-05T00:26:31,223 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:39785/user/jenkins/test-data/8e362bed-ad9f-3335-90a0-9a3552455a83/WALs/2113c16e5528,34229,1733358369517/2113c16e5528%2C34229%2C1733358369517.1733358391196 2024-12-05T00:26:31,223 WARN [IPC Server handler 2 on default port 39785 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/8e362bed-ad9f-3335-90a0-9a3552455a83/WALs/2113c16e5528,34229,1733358369517/2113c16e5528%2C34229%2C1733358369517.1733358391196 has not been closed. Lease recovery is in progress. RecoveryId = 1022 for block blk_1073741839_1021 2024-12-05T00:26:31,223 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:39785/user/jenkins/test-data/8e362bed-ad9f-3335-90a0-9a3552455a83/WALs/2113c16e5528,34229,1733358369517/2113c16e5528%2C34229%2C1733358369517.1733358391196 after 0ms 2024-12-05T00:26:31,606 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:39785/user/jenkins/test-data/8e362bed-ad9f-3335-90a0-9a3552455a83/WALs/2113c16e5528,34229,1733358369517/2113c16e5528%2C34229%2C1733358369517.1733358383825 is not closed yet, will try archiving it next time 2024-12-05T00:26:32,098 WARN [ResponseProcessor for block BP-309723592-172.17.0.2-1733358368776:blk_1073741839_1021 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-309723592-172.17.0.2-1733358368776:blk_1073741839_1021 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-05T00:26:32,098 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1408911775_22 at /127.0.0.1:52110 [Receiving block BP-309723592-172.17.0.2-1733358368776:blk_1073741839_1021] {}] datanode.DataXceiver(331): 127.0.0.1:41723:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:52110 dst: /127.0.0.1:41723 java.io.InterruptedIOException: Interrupted while waiting for IO on channel java.nio.channels.SocketChannel[connected local=localhost/127.0.0.1:41723 remote=/127.0.0.1:52110]. Total timeout mills is 60000, 59104 millis timeout left. at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:350) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-05T00:26:32,098 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1408911775_22 at /127.0.0.1:59244 [Receiving block BP-309723592-172.17.0.2-1733358368776:blk_1073741839_1021] {}] datanode.DataXceiver(331): 127.0.0.1:37127:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:59244 dst: /127.0.0.1:37127 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-05T00:26:32,098 WARN [DataStreamer for file /user/jenkins/test-data/8e362bed-ad9f-3335-90a0-9a3552455a83/WALs/2113c16e5528,34229,1733358369517/2113c16e5528%2C34229%2C1733358369517.1733358391196 block BP-309723592-172.17.0.2-1733358368776:blk_1073741839_1021 {}] hdfs.DataStreamer(1731): Error Recovery for BP-309723592-172.17.0.2-1733358368776:blk_1073741839_1021 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41723,DS-9be2a434-390b-4c40-8bf6-634cd8e7bb81,DISK], DatanodeInfoWithStorage[127.0.0.1:37127,DS-ec3f72de-df1b-4321-8c73-9d1f496e4794,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41723,DS-9be2a434-390b-4c40-8bf6-634cd8e7bb81,DISK]) is bad. 2024-12-05T00:26:32,099 WARN [DataStreamer for file /user/jenkins/test-data/8e362bed-ad9f-3335-90a0-9a3552455a83/WALs/2113c16e5528,34229,1733358369517/2113c16e5528%2C34229%2C1733358369517.1733358391196 block BP-309723592-172.17.0.2-1733358368776:blk_1073741839_1021 {}] hdfs.DataStreamer(859): DataStreamer Exception org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-309723592-172.17.0.2-1733358368776:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-05T00:26:32,104 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41723 is added to blk_1073741839_1022 (size=85) 2024-12-05T00:26:32,106 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37127 is added to blk_1073741839_1022 (size=85) 2024-12-05T00:26:32,194 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,45031,1733358326108/2113c16e5528%2C45031%2C1733358326108.1733358326307 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:26:32,202 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,37749,1733358324931/2113c16e5528%2C37749%2C1733358324931.meta.1733358325913.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:26:33,195 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,45031,1733358326108/2113c16e5528%2C45031%2C1733358326108.1733358326307 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:26:33,197 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:39785/user/jenkins/test-data/8e362bed-ad9f-3335-90a0-9a3552455a83/WALs/2113c16e5528,34229,1733358369517/2113c16e5528%2C34229%2C1733358369517.1733358383825 after 4002ms 2024-12-05T00:26:33,203 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,37749,1733358324931/2113c16e5528%2C37749%2C1733358324931.meta.1733358325913.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:26:34,195 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,45031,1733358326108/2113c16e5528%2C45031%2C1733358326108.1733358326307 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:26:34,203 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,37749,1733358324931/2113c16e5528%2C37749%2C1733358324931.meta.1733358325913.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:26:35,196 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,45031,1733358326108/2113c16e5528%2C45031%2C1733358326108.1733358326307 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:26:35,204 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,37749,1733358324931/2113c16e5528%2C37749%2C1733358324931.meta.1733358325913.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:26:35,224 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:39785/user/jenkins/test-data/8e362bed-ad9f-3335-90a0-9a3552455a83/WALs/2113c16e5528,34229,1733358369517/2113c16e5528%2C34229%2C1733358369517.1733358391196 after 4001ms 2024-12-05T00:26:35,224 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/8e362bed-ad9f-3335-90a0-9a3552455a83/WALs/2113c16e5528,34229,1733358369517/2113c16e5528%2C34229%2C1733358369517.1733358391196 2024-12-05T00:26:35,228 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/8e362bed-ad9f-3335-90a0-9a3552455a83/WALs/2113c16e5528,34229,1733358369517/2113c16e5528%2C34229%2C1733358369517.1733358391196 2024-12-05T00:26:35,228 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.74 KB heapSize=3.77 KB 2024-12-05T00:26:35,229 ERROR [FSHLog-0-hdfs://localhost:39785/user/jenkins/test-data/8e362bed-ad9f-3335-90a0-9a3552455a83-prefix:2113c16e5528,34229,1733358369517.meta {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:32793,DS-9be2a434-390b-4c40-8bf6-634cd8e7bb81,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-05T00:26:35,229 WARN [FSHLog-0-hdfs://localhost:39785/user/jenkins/test-data/8e362bed-ad9f-3335-90a0-9a3552455a83-prefix:2113c16e5528,34229,1733358369517.meta {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:32793,DS-9be2a434-390b-4c40-8bf6-634cd8e7bb81,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-05T00:26:35,229 DEBUG [regionserver/2113c16e5528:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 2113c16e5528%2C34229%2C1733358369517.meta:.meta(num 1733358370295) roll requested 2024-12-05T00:26:35,229 INFO [regionserver/2113c16e5528:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 2113c16e5528%2C34229%2C1733358369517.meta.1733358395229.meta 2024-12-05T00:26:35,235 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:26:35,235 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:26:35,235 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:26:35,235 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:26:35,236 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:26:35,236 INFO [regionserver/2113c16e5528:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/8e362bed-ad9f-3335-90a0-9a3552455a83/WALs/2113c16e5528,34229,1733358369517/2113c16e5528%2C34229%2C1733358369517.meta.1733358370295.meta with entries=8, filesize=2.36 KB; new WAL /user/jenkins/test-data/8e362bed-ad9f-3335-90a0-9a3552455a83/WALs/2113c16e5528,34229,1733358369517/2113c16e5528%2C34229%2C1733358369517.meta.1733358395229.meta 2024-12-05T00:26:35,236 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:32793,DS-9be2a434-390b-4c40-8bf6-634cd8e7bb81,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-05T00:26:35,236 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:32793,DS-9be2a434-390b-4c40-8bf6-634cd8e7bb81,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-05T00:26:35,236 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:39785/user/jenkins/test-data/8e362bed-ad9f-3335-90a0-9a3552455a83/WALs/2113c16e5528,34229,1733358369517/2113c16e5528%2C34229%2C1733358369517.meta.1733358370295.meta 2024-12-05T00:26:35,237 WARN [IPC Server handler 2 on default port 39785 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/8e362bed-ad9f-3335-90a0-9a3552455a83/WALs/2113c16e5528,34229,1733358369517/2113c16e5528%2C34229%2C1733358369517.meta.1733358370295.meta has not been closed. Lease recovery is in progress. RecoveryId = 1024 for block blk_1073741834_1015 2024-12-05T00:26:35,237 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:39785/user/jenkins/test-data/8e362bed-ad9f-3335-90a0-9a3552455a83/WALs/2113c16e5528,34229,1733358369517/2113c16e5528%2C34229%2C1733358369517.meta.1733358370295.meta after 1ms 2024-12-05T00:26:35,238 DEBUG [regionserver/2113c16e5528:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35015:35015),(127.0.0.1/127.0.0.1:43539:43539)] 2024-12-05T00:26:35,238 DEBUG [regionserver/2113c16e5528:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:39785/user/jenkins/test-data/8e362bed-ad9f-3335-90a0-9a3552455a83/WALs/2113c16e5528,34229,1733358369517/2113c16e5528%2C34229%2C1733358369517.meta.1733358370295.meta is not closed yet, will try archiving it next time 2024-12-05T00:26:35,254 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39785/user/jenkins/test-data/8e362bed-ad9f-3335-90a0-9a3552455a83/data/hbase/meta/1588230740/.tmp/info/b379d2aed20d484c865663623c13fcfa is 207, key is TestLogRolling-testLogRollOnPipelineRestart,,1733358370461.cf04f666e21781f1c981b866e52fbc58./info:regioninfo/1733358370818/Put/seqid=0 2024-12-05T00:26:35,258 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41723 is added to blk_1073741841_1025 (size=7125) 2024-12-05T00:26:35,259 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37127 is added to blk_1073741841_1025 (size=7125) 2024-12-05T00:26:35,259 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.52 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:39785/user/jenkins/test-data/8e362bed-ad9f-3335-90a0-9a3552455a83/data/hbase/meta/1588230740/.tmp/info/b379d2aed20d484c865663623c13fcfa 2024-12-05T00:26:35,279 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39785/user/jenkins/test-data/8e362bed-ad9f-3335-90a0-9a3552455a83/data/hbase/meta/1588230740/.tmp/ns/9b8aeb21b4fe4863ab4229f8b2472f1d is 43, key is default/ns:d/1733358370342/Put/seqid=0 2024-12-05T00:26:35,283 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41723 is added to blk_1073741842_1026 (size=5153) 2024-12-05T00:26:35,284 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37127 is added to blk_1073741842_1026 (size=5153) 2024-12-05T00:26:35,284 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:39785/user/jenkins/test-data/8e362bed-ad9f-3335-90a0-9a3552455a83/data/hbase/meta/1588230740/.tmp/ns/9b8aeb21b4fe4863ab4229f8b2472f1d 2024-12-05T00:26:35,309 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39785/user/jenkins/test-data/8e362bed-ad9f-3335-90a0-9a3552455a83/data/hbase/meta/1588230740/.tmp/table/d8fcafcef7bb47629d6908ae8095b7ac is 79, key is TestLogRolling-testLogRollOnPipelineRestart/table:state/1733358370828/Put/seqid=0 2024-12-05T00:26:35,314 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41723 is added to blk_1073741843_1027 (size=5438) 2024-12-05T00:26:35,314 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37127 is added to blk_1073741843_1027 (size=5438) 2024-12-05T00:26:35,315 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=150 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:39785/user/jenkins/test-data/8e362bed-ad9f-3335-90a0-9a3552455a83/data/hbase/meta/1588230740/.tmp/table/d8fcafcef7bb47629d6908ae8095b7ac 2024-12-05T00:26:35,320 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39785/user/jenkins/test-data/8e362bed-ad9f-3335-90a0-9a3552455a83/data/hbase/meta/1588230740/.tmp/info/b379d2aed20d484c865663623c13fcfa as hdfs://localhost:39785/user/jenkins/test-data/8e362bed-ad9f-3335-90a0-9a3552455a83/data/hbase/meta/1588230740/info/b379d2aed20d484c865663623c13fcfa 2024-12-05T00:26:35,325 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39785/user/jenkins/test-data/8e362bed-ad9f-3335-90a0-9a3552455a83/data/hbase/meta/1588230740/info/b379d2aed20d484c865663623c13fcfa, entries=10, sequenceid=11, filesize=7.0 K 2024-12-05T00:26:35,326 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39785/user/jenkins/test-data/8e362bed-ad9f-3335-90a0-9a3552455a83/data/hbase/meta/1588230740/.tmp/ns/9b8aeb21b4fe4863ab4229f8b2472f1d as hdfs://localhost:39785/user/jenkins/test-data/8e362bed-ad9f-3335-90a0-9a3552455a83/data/hbase/meta/1588230740/ns/9b8aeb21b4fe4863ab4229f8b2472f1d 2024-12-05T00:26:35,331 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39785/user/jenkins/test-data/8e362bed-ad9f-3335-90a0-9a3552455a83/data/hbase/meta/1588230740/ns/9b8aeb21b4fe4863ab4229f8b2472f1d, entries=2, sequenceid=11, filesize=5.0 K 2024-12-05T00:26:35,332 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39785/user/jenkins/test-data/8e362bed-ad9f-3335-90a0-9a3552455a83/data/hbase/meta/1588230740/.tmp/table/d8fcafcef7bb47629d6908ae8095b7ac as hdfs://localhost:39785/user/jenkins/test-data/8e362bed-ad9f-3335-90a0-9a3552455a83/data/hbase/meta/1588230740/table/d8fcafcef7bb47629d6908ae8095b7ac 2024-12-05T00:26:35,337 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39785/user/jenkins/test-data/8e362bed-ad9f-3335-90a0-9a3552455a83/data/hbase/meta/1588230740/table/d8fcafcef7bb47629d6908ae8095b7ac, entries=2, sequenceid=11, filesize=5.3 K 2024-12-05T00:26:35,338 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~1.74 KB/1782, heapSize ~3.48 KB/3560, currentSize=0 B/0 for 1588230740 in 110ms, sequenceid=11, compaction requested=false 2024-12-05T00:26:35,338 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-12-05T00:26:35,338 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing cf04f666e21781f1c981b866e52fbc58 1/1 column families, dataSize=4.20 KB heapSize=4.75 KB 2024-12-05T00:26:35,339 ERROR [FSHLog-0-hdfs://localhost:39785/user/jenkins/test-data/8e362bed-ad9f-3335-90a0-9a3552455a83-prefix:2113c16e5528,34229,1733358369517 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-309723592-172.17.0.2-1733358368776:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-05T00:26:35,339 WARN [FSHLog-0-hdfs://localhost:39785/user/jenkins/test-data/8e362bed-ad9f-3335-90a0-9a3552455a83-prefix:2113c16e5528,34229,1733358369517 {}] wal.AbstractFSWAL(2174): append entry failed org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-309723592-172.17.0.2-1733358368776:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-05T00:26:35,339 DEBUG [regionserver/2113c16e5528:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 2113c16e5528%2C34229%2C1733358369517:(num 1733358391196) roll requested 2024-12-05T00:26:35,339 INFO [regionserver/2113c16e5528:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 2113c16e5528%2C34229%2C1733358369517.1733358395339 2024-12-05T00:26:35,344 DEBUG [regionserver/2113c16e5528:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:39785/user/jenkins/test-data/8e362bed-ad9f-3335-90a0-9a3552455a83/WALs/2113c16e5528,34229,1733358369517/2113c16e5528%2C34229%2C1733358369517.1733358391196 newFile=hdfs://localhost:39785/user/jenkins/test-data/8e362bed-ad9f-3335-90a0-9a3552455a83/WALs/2113c16e5528,34229,1733358369517/2113c16e5528%2C34229%2C1733358369517.1733358395339 2024-12-05T00:26:35,344 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:26:35,344 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:26:35,344 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:26:35,344 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:26:35,344 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:26:35,345 INFO [regionserver/2113c16e5528:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/8e362bed-ad9f-3335-90a0-9a3552455a83/WALs/2113c16e5528,34229,1733358369517/2113c16e5528%2C34229%2C1733358369517.1733358391196 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/8e362bed-ad9f-3335-90a0-9a3552455a83/WALs/2113c16e5528,34229,1733358369517/2113c16e5528%2C34229%2C1733358369517.1733358395339 2024-12-05T00:26:35,345 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-309723592-172.17.0.2-1733358368776:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-05T00:26:35,345 DEBUG [regionserver/2113c16e5528:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43539:43539),(127.0.0.1/127.0.0.1:35015:35015)] 2024-12-05T00:26:35,345 DEBUG [regionserver/2113c16e5528:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:39785/user/jenkins/test-data/8e362bed-ad9f-3335-90a0-9a3552455a83/WALs/2113c16e5528,34229,1733358369517/2113c16e5528%2C34229%2C1733358369517.1733358391196 is not closed yet, will try archiving it next time 2024-12-05T00:26:35,345 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-309723592-172.17.0.2-1733358368776:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-05T00:26:35,345 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:39785/user/jenkins/test-data/8e362bed-ad9f-3335-90a0-9a3552455a83/WALs/2113c16e5528,34229,1733358369517/2113c16e5528%2C34229%2C1733358369517.1733358391196 2024-12-05T00:26:35,346 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:39785/user/jenkins/test-data/8e362bed-ad9f-3335-90a0-9a3552455a83/WALs/2113c16e5528,34229,1733358369517/2113c16e5528%2C34229%2C1733358369517.1733358391196 after 1ms 2024-12-05T00:26:35,346 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:39785/user/jenkins/test-data/8e362bed-ad9f-3335-90a0-9a3552455a83/WALs/2113c16e5528,34229,1733358369517/2113c16e5528%2C34229%2C1733358369517.1733358391196 to hdfs://localhost:39785/user/jenkins/test-data/8e362bed-ad9f-3335-90a0-9a3552455a83/oldWALs/2113c16e5528%2C34229%2C1733358369517.1733358391196 2024-12-05T00:26:35,361 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39785/user/jenkins/test-data/8e362bed-ad9f-3335-90a0-9a3552455a83/data/default/TestLogRolling-testLogRollOnPipelineRestart/cf04f666e21781f1c981b866e52fbc58/.tmp/info/269606db207c4161bca2794ce161bf45 is 1080, key is row1002/info:/1733358380499/Put/seqid=0 2024-12-05T00:26:35,365 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37127 is added to blk_1073741845_1029 (size=9270) 2024-12-05T00:26:35,366 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41723 is added to blk_1073741845_1029 (size=9270) 2024-12-05T00:26:35,366 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.20 KB at sequenceid=8 (bloomFilter=true), to=hdfs://localhost:39785/user/jenkins/test-data/8e362bed-ad9f-3335-90a0-9a3552455a83/data/default/TestLogRolling-testLogRollOnPipelineRestart/cf04f666e21781f1c981b866e52fbc58/.tmp/info/269606db207c4161bca2794ce161bf45 2024-12-05T00:26:35,372 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39785/user/jenkins/test-data/8e362bed-ad9f-3335-90a0-9a3552455a83/data/default/TestLogRolling-testLogRollOnPipelineRestart/cf04f666e21781f1c981b866e52fbc58/.tmp/info/269606db207c4161bca2794ce161bf45 as hdfs://localhost:39785/user/jenkins/test-data/8e362bed-ad9f-3335-90a0-9a3552455a83/data/default/TestLogRolling-testLogRollOnPipelineRestart/cf04f666e21781f1c981b866e52fbc58/info/269606db207c4161bca2794ce161bf45 2024-12-05T00:26:35,377 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39785/user/jenkins/test-data/8e362bed-ad9f-3335-90a0-9a3552455a83/data/default/TestLogRolling-testLogRollOnPipelineRestart/cf04f666e21781f1c981b866e52fbc58/info/269606db207c4161bca2794ce161bf45, entries=4, sequenceid=8, filesize=9.1 K 2024-12-05T00:26:35,378 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~4.20 KB/4304, heapSize ~4.73 KB/4848, currentSize=0 B/0 for cf04f666e21781f1c981b866e52fbc58 in 40ms, sequenceid=8, compaction requested=false 2024-12-05T00:26:35,378 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for cf04f666e21781f1c981b866e52fbc58: 2024-12-05T00:26:35,383 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-05T00:26:35,384 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-05T00:26:35,384 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-05T00:26:35,384 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T00:26:35,384 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T00:26:35,384 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-05T00:26:35,384 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-05T00:26:35,384 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=653564868, stopped=false 2024-12-05T00:26:35,384 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=2113c16e5528,46237,1733358369470 2024-12-05T00:26:35,386 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46237-0x1018003a53f0000, quorum=127.0.0.1:51550, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-05T00:26:35,386 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34229-0x1018003a53f0001, quorum=127.0.0.1:51550, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-05T00:26:35,386 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46237-0x1018003a53f0000, quorum=127.0.0.1:51550, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T00:26:35,386 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34229-0x1018003a53f0001, quorum=127.0.0.1:51550, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T00:26:35,386 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-05T00:26:35,387 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-05T00:26:35,387 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-05T00:26:35,387 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:34229-0x1018003a53f0001, quorum=127.0.0.1:51550, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-05T00:26:35,387 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:46237-0x1018003a53f0000, quorum=127.0.0.1:51550, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-05T00:26:35,387 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T00:26:35,387 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '2113c16e5528,34229,1733358369517' ***** 2024-12-05T00:26:35,387 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-05T00:26:35,387 INFO [RS:0;2113c16e5528:34229 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-05T00:26:35,387 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-05T00:26:35,388 INFO [RS:0;2113c16e5528:34229 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-05T00:26:35,388 INFO [RS:0;2113c16e5528:34229 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-05T00:26:35,388 INFO [RS:0;2113c16e5528:34229 {}] regionserver.HRegionServer(3091): Received CLOSE for cf04f666e21781f1c981b866e52fbc58 2024-12-05T00:26:35,388 INFO [RS:0;2113c16e5528:34229 {}] regionserver.HRegionServer(959): stopping server 2113c16e5528,34229,1733358369517 2024-12-05T00:26:35,388 INFO [RS:0;2113c16e5528:34229 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-05T00:26:35,388 INFO [RS:0;2113c16e5528:34229 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;2113c16e5528:34229. 2024-12-05T00:26:35,388 DEBUG [RS:0;2113c16e5528:34229 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-05T00:26:35,388 DEBUG [RS:0;2113c16e5528:34229 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T00:26:35,388 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing cf04f666e21781f1c981b866e52fbc58, disabling compactions & flushes 2024-12-05T00:26:35,388 INFO [RS:0;2113c16e5528:34229 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-05T00:26:35,388 INFO [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnPipelineRestart,,1733358370461.cf04f666e21781f1c981b866e52fbc58. 2024-12-05T00:26:35,388 INFO [RS:0;2113c16e5528:34229 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-05T00:26:35,388 INFO [RS:0;2113c16e5528:34229 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-05T00:26:35,388 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnPipelineRestart,,1733358370461.cf04f666e21781f1c981b866e52fbc58. 2024-12-05T00:26:35,388 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnPipelineRestart,,1733358370461.cf04f666e21781f1c981b866e52fbc58. after waiting 0 ms 2024-12-05T00:26:35,388 INFO [RS:0;2113c16e5528:34229 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-05T00:26:35,388 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnPipelineRestart,,1733358370461.cf04f666e21781f1c981b866e52fbc58. 2024-12-05T00:26:35,388 INFO [RS:0;2113c16e5528:34229 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-12-05T00:26:35,388 DEBUG [RS:0;2113c16e5528:34229 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, cf04f666e21781f1c981b866e52fbc58=TestLogRolling-testLogRollOnPipelineRestart,,1733358370461.cf04f666e21781f1c981b866e52fbc58.} 2024-12-05T00:26:35,389 DEBUG [RS:0;2113c16e5528:34229 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, cf04f666e21781f1c981b866e52fbc58 2024-12-05T00:26:35,389 DEBUG [RS_CLOSE_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-05T00:26:35,389 INFO [RS_CLOSE_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-05T00:26:35,389 DEBUG [RS_CLOSE_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-05T00:26:35,389 DEBUG [RS_CLOSE_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-05T00:26:35,389 DEBUG [RS_CLOSE_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-05T00:26:35,393 DEBUG [RS_CLOSE_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39785/user/jenkins/test-data/8e362bed-ad9f-3335-90a0-9a3552455a83/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-12-05T00:26:35,394 DEBUG [RS_CLOSE_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-05T00:26:35,394 INFO [RS_CLOSE_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-05T00:26:35,394 DEBUG [RS_CLOSE_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733358395389Running coprocessor pre-close hooks at 1733358395389Disabling compacts and flushes for region at 1733358395389Disabling writes for close at 1733358395389Writing region close event to WAL at 1733358395390 (+1 ms)Running coprocessor post-close hooks at 1733358395394 (+4 ms)Closed at 1733358395394 2024-12-05T00:26:35,394 DEBUG [RS_CLOSE_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-05T00:26:35,394 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39785/user/jenkins/test-data/8e362bed-ad9f-3335-90a0-9a3552455a83/data/default/TestLogRolling-testLogRollOnPipelineRestart/cf04f666e21781f1c981b866e52fbc58/recovered.edits/11.seqid, newMaxSeqId=11, maxSeqId=1 2024-12-05T00:26:35,395 INFO [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnPipelineRestart,,1733358370461.cf04f666e21781f1c981b866e52fbc58. 2024-12-05T00:26:35,395 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for cf04f666e21781f1c981b866e52fbc58: Waiting for close lock at 1733358395388Running coprocessor pre-close hooks at 1733358395388Disabling compacts and flushes for region at 1733358395388Disabling writes for close at 1733358395388Writing region close event to WAL at 1733358395389 (+1 ms)Running coprocessor post-close hooks at 1733358395395 (+6 ms)Closed at 1733358395395 2024-12-05T00:26:35,395 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRollOnPipelineRestart,,1733358370461.cf04f666e21781f1c981b866e52fbc58. 2024-12-05T00:26:35,589 INFO [RS:0;2113c16e5528:34229 {}] regionserver.HRegionServer(976): stopping server 2113c16e5528,34229,1733358369517; all regions closed. 2024-12-05T00:26:35,589 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:26:35,589 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:26:35,590 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:26:35,590 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:26:35,590 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:26:35,592 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41723 is added to blk_1073741840_1023 (size=825) 2024-12-05T00:26:35,592 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37127 is added to blk_1073741840_1023 (size=825) 2024-12-05T00:26:35,776 INFO [regionserver/2113c16e5528:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-05T00:26:35,776 INFO [regionserver/2113c16e5528:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-12-05T00:26:35,777 INFO [regionserver/2113c16e5528:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-05T00:26:36,196 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,45031,1733358326108/2113c16e5528%2C45031%2C1733358326108.1733358326307 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:26:36,204 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,37749,1733358324931/2113c16e5528%2C37749%2C1733358324931.meta.1733358325913.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:26:37,197 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,45031,1733358326108/2113c16e5528%2C45031%2C1733358326108.1733358326307 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:26:37,205 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,37749,1733358324931/2113c16e5528%2C37749%2C1733358324931.meta.1733358325913.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:26:38,197 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,45031,1733358326108/2113c16e5528%2C45031%2C1733358326108.1733358326307 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:26:38,205 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,37749,1733358324931/2113c16e5528%2C37749%2C1733358324931.meta.1733358325913.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:26:39,198 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,45031,1733358326108/2113c16e5528%2C45031%2C1733358326108.1733358326307 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:26:39,206 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,37749,1733358324931/2113c16e5528%2C37749%2C1733358324931.meta.1733358325913.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:26:39,238 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:39785/user/jenkins/test-data/8e362bed-ad9f-3335-90a0-9a3552455a83/WALs/2113c16e5528,34229,1733358369517/2113c16e5528%2C34229%2C1733358369517.meta.1733358370295.meta after 4002ms 2024-12-05T00:26:39,238 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:39785/user/jenkins/test-data/8e362bed-ad9f-3335-90a0-9a3552455a83/WALs/2113c16e5528,34229,1733358369517/2113c16e5528%2C34229%2C1733358369517.meta.1733358370295.meta to hdfs://localhost:39785/user/jenkins/test-data/8e362bed-ad9f-3335-90a0-9a3552455a83/oldWALs/2113c16e5528%2C34229%2C1733358369517.meta.1733358370295.meta 2024-12-05T00:26:39,241 DEBUG [RS:0;2113c16e5528:34229 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/8e362bed-ad9f-3335-90a0-9a3552455a83/oldWALs 2024-12-05T00:26:39,241 INFO [RS:0;2113c16e5528:34229 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 2113c16e5528%2C34229%2C1733358369517.meta:.meta(num 1733358395229) 2024-12-05T00:26:39,241 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:26:39,241 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:26:39,241 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:26:39,241 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:26:39,242 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:26:39,243 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37127 is added to blk_1073741844_1028 (size=1162) 2024-12-05T00:26:39,244 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41723 is added to blk_1073741844_1028 (size=1162) 2024-12-05T00:26:39,248 DEBUG [RS:0;2113c16e5528:34229 {}] wal.AbstractFSWAL(1256): Moved 4 WAL file(s) to /user/jenkins/test-data/8e362bed-ad9f-3335-90a0-9a3552455a83/oldWALs 2024-12-05T00:26:39,248 INFO [RS:0;2113c16e5528:34229 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 2113c16e5528%2C34229%2C1733358369517:(num 1733358395339) 2024-12-05T00:26:39,248 DEBUG [RS:0;2113c16e5528:34229 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T00:26:39,248 INFO [RS:0;2113c16e5528:34229 {}] regionserver.LeaseManager(133): Closed leases 2024-12-05T00:26:39,248 INFO [RS:0;2113c16e5528:34229 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-05T00:26:39,248 INFO [RS:0;2113c16e5528:34229 {}] hbase.ChoreService(370): Chore service for: regionserver/2113c16e5528:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-05T00:26:39,249 INFO [RS:0;2113c16e5528:34229 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-05T00:26:39,249 INFO [regionserver/2113c16e5528:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-05T00:26:39,249 INFO [RS:0;2113c16e5528:34229 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:34229 2024-12-05T00:26:39,251 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34229-0x1018003a53f0001, quorum=127.0.0.1:51550, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/2113c16e5528,34229,1733358369517 2024-12-05T00:26:39,251 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46237-0x1018003a53f0000, quorum=127.0.0.1:51550, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-05T00:26:39,251 INFO [RS:0;2113c16e5528:34229 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-05T00:26:39,252 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [2113c16e5528,34229,1733358369517] 2024-12-05T00:26:39,254 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741834_1015: GenerationStamp not matched, existing replica is blk_1073741834_1010 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-12-05T00:26:39,255 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/2113c16e5528,34229,1733358369517 already deleted, retry=false 2024-12-05T00:26:39,255 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 2113c16e5528,34229,1733358369517 expired; onlineServers=0 2024-12-05T00:26:39,255 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '2113c16e5528,46237,1733358369470' ***** 2024-12-05T00:26:39,255 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-05T00:26:39,255 INFO [M:0;2113c16e5528:46237 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-05T00:26:39,255 INFO [M:0;2113c16e5528:46237 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-05T00:26:39,255 DEBUG [M:0;2113c16e5528:46237 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-05T00:26:39,255 DEBUG [M:0;2113c16e5528:46237 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-05T00:26:39,255 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-05T00:26:39,255 DEBUG [master/2113c16e5528:0:becomeActiveMaster-HFileCleaner.large.0-1733358369698 {}] cleaner.HFileCleaner(306): Exit Thread[master/2113c16e5528:0:becomeActiveMaster-HFileCleaner.large.0-1733358369698,5,FailOnTimeoutGroup] 2024-12-05T00:26:39,255 DEBUG [master/2113c16e5528:0:becomeActiveMaster-HFileCleaner.small.0-1733358369699 {}] cleaner.HFileCleaner(306): Exit Thread[master/2113c16e5528:0:becomeActiveMaster-HFileCleaner.small.0-1733358369699,5,FailOnTimeoutGroup] 2024-12-05T00:26:39,255 INFO [M:0;2113c16e5528:46237 {}] hbase.ChoreService(370): Chore service for: master/2113c16e5528:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-05T00:26:39,255 INFO [M:0;2113c16e5528:46237 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-05T00:26:39,255 DEBUG [M:0;2113c16e5528:46237 {}] master.HMaster(1795): Stopping service threads 2024-12-05T00:26:39,256 INFO [M:0;2113c16e5528:46237 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-05T00:26:39,256 INFO [M:0;2113c16e5528:46237 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-05T00:26:39,256 INFO [M:0;2113c16e5528:46237 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-05T00:26:39,256 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-05T00:26:39,256 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46237-0x1018003a53f0000, quorum=127.0.0.1:51550, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-05T00:26:39,257 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46237-0x1018003a53f0000, quorum=127.0.0.1:51550, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T00:26:39,257 DEBUG [M:0;2113c16e5528:46237 {}] zookeeper.ZKUtil(347): master:46237-0x1018003a53f0000, quorum=127.0.0.1:51550, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-05T00:26:39,257 WARN [M:0;2113c16e5528:46237 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-05T00:26:39,257 INFO [M:0;2113c16e5528:46237 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:39785/user/jenkins/test-data/8e362bed-ad9f-3335-90a0-9a3552455a83/.lastflushedseqids 2024-12-05T00:26:39,262 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37127 is added to blk_1073741846_1030 (size=139) 2024-12-05T00:26:39,263 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41723 is added to blk_1073741846_1030 (size=139) 2024-12-05T00:26:39,263 INFO [M:0;2113c16e5528:46237 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-05T00:26:39,263 INFO [M:0;2113c16e5528:46237 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-05T00:26:39,263 DEBUG [M:0;2113c16e5528:46237 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-05T00:26:39,263 INFO [M:0;2113c16e5528:46237 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-05T00:26:39,263 DEBUG [M:0;2113c16e5528:46237 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-05T00:26:39,263 DEBUG [M:0;2113c16e5528:46237 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-05T00:26:39,263 DEBUG [M:0;2113c16e5528:46237 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-05T00:26:39,263 INFO [M:0;2113c16e5528:46237 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.17 KB heapSize=29.16 KB 2024-12-05T00:26:39,264 ERROR [FSHLog-0-hdfs://localhost:39785/user/jenkins/test-data/8e362bed-ad9f-3335-90a0-9a3552455a83/MasterData-prefix:2113c16e5528,46237,1733358369470 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:32793,DS-9be2a434-390b-4c40-8bf6-634cd8e7bb81,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-05T00:26:39,264 WARN [FSHLog-0-hdfs://localhost:39785/user/jenkins/test-data/8e362bed-ad9f-3335-90a0-9a3552455a83/MasterData-prefix:2113c16e5528,46237,1733358369470 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:32793,DS-9be2a434-390b-4c40-8bf6-634cd8e7bb81,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-05T00:26:39,264 DEBUG [master:store-WAL-Roller {}] wal.AbstractWALRoller(198): WAL FSHLog 2113c16e5528%2C46237%2C1733358369470:(num 1733358369614) roll requested 2024-12-05T00:26:39,264 INFO [master:store-WAL-Roller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 2113c16e5528%2C46237%2C1733358369470.1733358399264 2024-12-05T00:26:39,268 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:26:39,268 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:26:39,268 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:26:39,268 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:26:39,269 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:26:39,269 INFO [master:store-WAL-Roller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/8e362bed-ad9f-3335-90a0-9a3552455a83/MasterData/WALs/2113c16e5528,46237,1733358369470/2113c16e5528%2C46237%2C1733358369470.1733358369614 with entries=53, filesize=26.62 KB; new WAL /user/jenkins/test-data/8e362bed-ad9f-3335-90a0-9a3552455a83/MasterData/WALs/2113c16e5528,46237,1733358369470/2113c16e5528%2C46237%2C1733358369470.1733358399264 2024-12-05T00:26:39,269 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:32793,DS-9be2a434-390b-4c40-8bf6-634cd8e7bb81,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-05T00:26:39,269 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:32793,DS-9be2a434-390b-4c40-8bf6-634cd8e7bb81,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-05T00:26:39,269 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:39785/user/jenkins/test-data/8e362bed-ad9f-3335-90a0-9a3552455a83/MasterData/WALs/2113c16e5528,46237,1733358369470/2113c16e5528%2C46237%2C1733358369470.1733358369614 2024-12-05T00:26:39,269 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35015:35015),(127.0.0.1/127.0.0.1:43539:43539)] 2024-12-05T00:26:39,269 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(879): hdfs://localhost:39785/user/jenkins/test-data/8e362bed-ad9f-3335-90a0-9a3552455a83/MasterData/WALs/2113c16e5528,46237,1733358369470/2113c16e5528%2C46237%2C1733358369470.1733358369614 is not closed yet, will try archiving it next time 2024-12-05T00:26:39,269 WARN [IPC Server handler 1 on default port 39785 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/8e362bed-ad9f-3335-90a0-9a3552455a83/MasterData/WALs/2113c16e5528,46237,1733358369470/2113c16e5528%2C46237%2C1733358369470.1733358369614 has not been closed. Lease recovery is in progress. RecoveryId = 1032 for block blk_1073741830_1013 2024-12-05T00:26:39,270 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:39785/user/jenkins/test-data/8e362bed-ad9f-3335-90a0-9a3552455a83/MasterData/WALs/2113c16e5528,46237,1733358369470/2113c16e5528%2C46237%2C1733358369470.1733358369614 after 1ms 2024-12-05T00:26:39,284 DEBUG [M:0;2113c16e5528:46237 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39785/user/jenkins/test-data/8e362bed-ad9f-3335-90a0-9a3552455a83/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/0b540600d91e4998b44fff3fd6ed8c63 is 82, key is hbase:meta,,1/info:regioninfo/1733358370326/Put/seqid=0 2024-12-05T00:26:39,288 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37127 is added to blk_1073741848_1033 (size=5672) 2024-12-05T00:26:39,289 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41723 is added to blk_1073741848_1033 (size=5672) 2024-12-05T00:26:39,289 INFO [M:0;2113c16e5528:46237 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:39785/user/jenkins/test-data/8e362bed-ad9f-3335-90a0-9a3552455a83/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/0b540600d91e4998b44fff3fd6ed8c63 2024-12-05T00:26:39,307 DEBUG [M:0;2113c16e5528:46237 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39785/user/jenkins/test-data/8e362bed-ad9f-3335-90a0-9a3552455a83/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/12a967dd82e7455eae11189da0159131 is 778, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1733358370832/Put/seqid=0 2024-12-05T00:26:39,311 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37127 is added to blk_1073741849_1034 (size=6118) 2024-12-05T00:26:39,312 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41723 is added to blk_1073741849_1034 (size=6118) 2024-12-05T00:26:39,312 INFO [M:0;2113c16e5528:46237 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.57 KB at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:39785/user/jenkins/test-data/8e362bed-ad9f-3335-90a0-9a3552455a83/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/12a967dd82e7455eae11189da0159131 2024-12-05T00:26:39,331 DEBUG [M:0;2113c16e5528:46237 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39785/user/jenkins/test-data/8e362bed-ad9f-3335-90a0-9a3552455a83/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/6b3c114aa43f437cbb6ebd594a5154dd is 69, key is 2113c16e5528,34229,1733358369517/rs:state/1733358369763/Put/seqid=0 2024-12-05T00:26:39,335 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41723 is added to blk_1073741850_1035 (size=5156) 2024-12-05T00:26:39,336 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37127 is added to blk_1073741850_1035 (size=5156) 2024-12-05T00:26:39,336 INFO [M:0;2113c16e5528:46237 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:39785/user/jenkins/test-data/8e362bed-ad9f-3335-90a0-9a3552455a83/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/6b3c114aa43f437cbb6ebd594a5154dd 2024-12-05T00:26:39,352 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34229-0x1018003a53f0001, quorum=127.0.0.1:51550, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-05T00:26:39,353 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34229-0x1018003a53f0001, quorum=127.0.0.1:51550, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-05T00:26:39,353 INFO [RS:0;2113c16e5528:34229 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-05T00:26:39,353 INFO [RS:0;2113c16e5528:34229 {}] regionserver.HRegionServer(1031): Exiting; stopping=2113c16e5528,34229,1733358369517; zookeeper connection closed. 2024-12-05T00:26:39,353 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@26516e4e {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@26516e4e 2024-12-05T00:26:39,353 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-12-05T00:26:39,355 DEBUG [M:0;2113c16e5528:46237 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39785/user/jenkins/test-data/8e362bed-ad9f-3335-90a0-9a3552455a83/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/374d51c9d38e4d809e61f1f02ec96571 is 52, key is load_balancer_on/state:d/1733358370456/Put/seqid=0 2024-12-05T00:26:39,359 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37127 is added to blk_1073741851_1036 (size=5056) 2024-12-05T00:26:39,360 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41723 is added to blk_1073741851_1036 (size=5056) 2024-12-05T00:26:39,360 INFO [M:0;2113c16e5528:46237 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:39785/user/jenkins/test-data/8e362bed-ad9f-3335-90a0-9a3552455a83/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/374d51c9d38e4d809e61f1f02ec96571 2024-12-05T00:26:39,364 DEBUG [M:0;2113c16e5528:46237 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39785/user/jenkins/test-data/8e362bed-ad9f-3335-90a0-9a3552455a83/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/0b540600d91e4998b44fff3fd6ed8c63 as hdfs://localhost:39785/user/jenkins/test-data/8e362bed-ad9f-3335-90a0-9a3552455a83/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/0b540600d91e4998b44fff3fd6ed8c63 2024-12-05T00:26:39,369 INFO [M:0;2113c16e5528:46237 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39785/user/jenkins/test-data/8e362bed-ad9f-3335-90a0-9a3552455a83/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/0b540600d91e4998b44fff3fd6ed8c63, entries=8, sequenceid=56, filesize=5.5 K 2024-12-05T00:26:39,369 DEBUG [M:0;2113c16e5528:46237 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39785/user/jenkins/test-data/8e362bed-ad9f-3335-90a0-9a3552455a83/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/12a967dd82e7455eae11189da0159131 as hdfs://localhost:39785/user/jenkins/test-data/8e362bed-ad9f-3335-90a0-9a3552455a83/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/12a967dd82e7455eae11189da0159131 2024-12-05T00:26:39,374 INFO [M:0;2113c16e5528:46237 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39785/user/jenkins/test-data/8e362bed-ad9f-3335-90a0-9a3552455a83/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/12a967dd82e7455eae11189da0159131, entries=6, sequenceid=56, filesize=6.0 K 2024-12-05T00:26:39,375 DEBUG [M:0;2113c16e5528:46237 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39785/user/jenkins/test-data/8e362bed-ad9f-3335-90a0-9a3552455a83/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/6b3c114aa43f437cbb6ebd594a5154dd as hdfs://localhost:39785/user/jenkins/test-data/8e362bed-ad9f-3335-90a0-9a3552455a83/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/6b3c114aa43f437cbb6ebd594a5154dd 2024-12-05T00:26:39,379 INFO [M:0;2113c16e5528:46237 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39785/user/jenkins/test-data/8e362bed-ad9f-3335-90a0-9a3552455a83/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/6b3c114aa43f437cbb6ebd594a5154dd, entries=1, sequenceid=56, filesize=5.0 K 2024-12-05T00:26:39,380 DEBUG [M:0;2113c16e5528:46237 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39785/user/jenkins/test-data/8e362bed-ad9f-3335-90a0-9a3552455a83/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/374d51c9d38e4d809e61f1f02ec96571 as hdfs://localhost:39785/user/jenkins/test-data/8e362bed-ad9f-3335-90a0-9a3552455a83/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/374d51c9d38e4d809e61f1f02ec96571 2024-12-05T00:26:39,385 INFO [M:0;2113c16e5528:46237 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39785/user/jenkins/test-data/8e362bed-ad9f-3335-90a0-9a3552455a83/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/374d51c9d38e4d809e61f1f02ec96571, entries=1, sequenceid=56, filesize=4.9 K 2024-12-05T00:26:39,386 INFO [M:0;2113c16e5528:46237 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.17 KB/23726, heapSize ~29.10 KB/29800, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 123ms, sequenceid=56, compaction requested=false 2024-12-05T00:26:39,387 INFO [M:0;2113c16e5528:46237 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-05T00:26:39,388 DEBUG [M:0;2113c16e5528:46237 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733358399263Disabling compacts and flushes for region at 1733358399263Disabling writes for close at 1733358399263Obtaining lock to block concurrent updates at 1733358399263Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733358399263Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23726, getHeapSize=29800, getOffHeapSize=0, getCellsCount=67 at 1733358399264 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733358399270 (+6 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733358399270Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733358399284 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733358399284Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733358399294 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733358399307 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733358399307Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733358399317 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733358399331 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733358399331Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1733358399341 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1733358399355 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1733358399355Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3af8f36: reopening flushed file at 1733358399364 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4419198d: reopening flushed file at 1733358399369 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5fcf51c4: reopening flushed file at 1733358399374 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2465b8ed: reopening flushed file at 1733358399379 (+5 ms)Finished flush of dataSize ~23.17 KB/23726, heapSize ~29.10 KB/29800, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 123ms, sequenceid=56, compaction requested=false at 1733358399386 (+7 ms)Writing region close event to WAL at 1733358399387 (+1 ms)Closed at 1733358399387 2024-12-05T00:26:39,388 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:26:39,388 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:26:39,388 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:26:39,388 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:26:39,388 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:26:39,390 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41723 is added to blk_1073741847_1031 (size=757) 2024-12-05T00:26:39,390 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37127 is added to blk_1073741847_1031 (size=757) 2024-12-05T00:26:39,454 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-05T00:26:40,199 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,45031,1733358326108/2113c16e5528%2C45031%2C1733358326108.1733358326307 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:26:40,206 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,37749,1733358324931/2113c16e5528%2C37749%2C1733358324931.meta.1733358325913.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:26:40,395 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-05T00:26:40,395 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-05T00:26:40,409 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-05T00:26:40,409 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-05T00:26:40,409 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-05T00:26:40,409 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-05T00:26:40,409 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-05T00:26:40,410 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-05T00:26:40,412 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-05T00:26:40,413 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-05T00:26:40,413 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-05T00:26:40,415 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-05T00:26:40,418 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-05T00:26:40,418 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-05T00:26:40,920 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-05T00:26:40,921 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-05T00:26:40,921 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-05T00:26:40,922 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-05T00:26:40,922 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-05T00:26:40,938 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-05T00:26:40,938 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-05T00:26:40,938 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-05T00:26:40,938 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-05T00:26:40,939 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-05T00:26:40,939 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-05T00:26:40,942 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-05T00:26:40,942 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-05T00:26:40,943 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-05T00:26:40,945 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-05T00:26:41,199 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,45031,1733358326108/2113c16e5528%2C45031%2C1733358326108.1733358326307 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:26:41,206 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,37749,1733358324931/2113c16e5528%2C37749%2C1733358324931.meta.1733358325913.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:26:42,200 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,45031,1733358326108/2113c16e5528%2C45031%2C1733358326108.1733358326307 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:26:42,207 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,37749,1733358324931/2113c16e5528%2C37749%2C1733358324931.meta.1733358325913.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:26:42,255 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741830_1013: GenerationStamp not matched, existing replica is blk_1073741830_1006 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-12-05T00:26:43,200 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,45031,1733358326108/2113c16e5528%2C45031%2C1733358326108.1733358326307 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:26:43,207 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,37749,1733358324931/2113c16e5528%2C37749%2C1733358324931.meta.1733358325913.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:26:43,270 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:39785/user/jenkins/test-data/8e362bed-ad9f-3335-90a0-9a3552455a83/MasterData/WALs/2113c16e5528,46237,1733358369470/2113c16e5528%2C46237%2C1733358369470.1733358369614 after 4001ms 2024-12-05T00:26:43,271 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:39785/user/jenkins/test-data/8e362bed-ad9f-3335-90a0-9a3552455a83/MasterData/WALs/2113c16e5528,46237,1733358369470/2113c16e5528%2C46237%2C1733358369470.1733358369614 to hdfs://localhost:39785/user/jenkins/test-data/8e362bed-ad9f-3335-90a0-9a3552455a83/MasterData/oldWALs/2113c16e5528%2C46237%2C1733358369470.1733358369614 2024-12-05T00:26:43,274 INFO [WAL-Archive-0 {}] region.MasterRegionUtils(50): Moved hdfs://localhost:39785/user/jenkins/test-data/8e362bed-ad9f-3335-90a0-9a3552455a83/MasterData/oldWALs/2113c16e5528%2C46237%2C1733358369470.1733358369614 to hdfs://localhost:39785/user/jenkins/test-data/8e362bed-ad9f-3335-90a0-9a3552455a83/oldWALs/2113c16e5528%2C46237%2C1733358369470.1733358369614$masterlocalwal$ 2024-12-05T00:26:43,274 INFO [M:0;2113c16e5528:46237 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-05T00:26:43,274 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-05T00:26:43,274 INFO [M:0;2113c16e5528:46237 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:46237 2024-12-05T00:26:43,274 INFO [M:0;2113c16e5528:46237 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-05T00:26:43,376 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46237-0x1018003a53f0000, quorum=127.0.0.1:51550, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-05T00:26:43,376 INFO [M:0;2113c16e5528:46237 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-05T00:26:43,376 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46237-0x1018003a53f0000, quorum=127.0.0.1:51550, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-05T00:26:43,379 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5aaed393{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-05T00:26:43,379 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7939cb3e{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-05T00:26:43,379 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-05T00:26:43,379 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@49f94f8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-05T00:26:43,379 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@ae00177{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d645b8b8-8a15-95c6-84d0-ba5f777cf379/hadoop.log.dir/,STOPPED} 2024-12-05T00:26:43,380 WARN [BP-309723592-172.17.0.2-1733358368776 heartbeating to localhost/127.0.0.1:39785 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-05T00:26:43,380 WARN [BP-309723592-172.17.0.2-1733358368776 heartbeating to localhost/127.0.0.1:39785 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-309723592-172.17.0.2-1733358368776 (Datanode Uuid 27bbf400-a92b-4219-99da-be3681e7f296) service to localhost/127.0.0.1:39785 2024-12-05T00:26:43,380 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-05T00:26:43,380 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-05T00:26:43,381 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d645b8b8-8a15-95c6-84d0-ba5f777cf379/cluster_fb2be59a-c7c1-a812-1afa-4bf3219bf7b4/data/data3/current/BP-309723592-172.17.0.2-1733358368776 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-05T00:26:43,381 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d645b8b8-8a15-95c6-84d0-ba5f777cf379/cluster_fb2be59a-c7c1-a812-1afa-4bf3219bf7b4/data/data4/current/BP-309723592-172.17.0.2-1733358368776 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-05T00:26:43,381 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-05T00:26:43,383 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2b944a8d{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-05T00:26:43,384 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3d61cf28{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-05T00:26:43,384 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-05T00:26:43,384 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3f8a0d0d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-05T00:26:43,384 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@71e80301{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d645b8b8-8a15-95c6-84d0-ba5f777cf379/hadoop.log.dir/,STOPPED} 2024-12-05T00:26:43,385 WARN [BP-309723592-172.17.0.2-1733358368776 heartbeating to localhost/127.0.0.1:39785 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-05T00:26:43,385 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-05T00:26:43,385 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-05T00:26:43,385 WARN [BP-309723592-172.17.0.2-1733358368776 heartbeating to localhost/127.0.0.1:39785 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-309723592-172.17.0.2-1733358368776 (Datanode Uuid cf735d57-64b3-4247-acfb-7d0dd400132b) service to localhost/127.0.0.1:39785 2024-12-05T00:26:43,386 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d645b8b8-8a15-95c6-84d0-ba5f777cf379/cluster_fb2be59a-c7c1-a812-1afa-4bf3219bf7b4/data/data1/current/BP-309723592-172.17.0.2-1733358368776 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-05T00:26:43,386 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d645b8b8-8a15-95c6-84d0-ba5f777cf379/cluster_fb2be59a-c7c1-a812-1afa-4bf3219bf7b4/data/data2/current/BP-309723592-172.17.0.2-1733358368776 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-05T00:26:43,386 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-05T00:26:43,391 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@347a2271{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-05T00:26:43,392 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@464ea64c{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-05T00:26:43,392 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-05T00:26:43,392 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@662aecf5{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-05T00:26:43,392 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@27a49013{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d645b8b8-8a15-95c6-84d0-ba5f777cf379/hadoop.log.dir/,STOPPED} 2024-12-05T00:26:43,398 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-05T00:26:43,414 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-05T00:26:43,423 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnPipelineRestart Thread=179 (was 153) Potentially hanging thread: nioEventLoopGroup-31-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.4@localhost:39785 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-15 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-12 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:39785 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'DataNode' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: nioEventLoopGroup-33-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:39785 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:39785 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-13 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:39785 from jenkins.hfs.4 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HMaster-EventLoopGroup-10-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-14 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:39785 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-33-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:39785 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:39785 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-31-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=457 (was 452) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=114 (was 176), ProcessCount=11 (was 11), AvailableMemoryMB=8695 (was 8866) 2024-12-05T00:26:43,430 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testCompactionRecordDoesntBlockRolling Thread=179, OpenFileDescriptor=457, MaxFileDescriptor=1048576, SystemLoadAverage=114, ProcessCount=11, AvailableMemoryMB=8695 2024-12-05T00:26:43,430 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-05T00:26:43,430 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d645b8b8-8a15-95c6-84d0-ba5f777cf379/hadoop.log.dir so I do NOT create it in target/test-data/ea360916-0010-a7e3-14b6-cc9064d2e4cf 2024-12-05T00:26:43,430 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d645b8b8-8a15-95c6-84d0-ba5f777cf379/hadoop.tmp.dir so I do NOT create it in target/test-data/ea360916-0010-a7e3-14b6-cc9064d2e4cf 2024-12-05T00:26:43,430 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ea360916-0010-a7e3-14b6-cc9064d2e4cf/cluster_47ea64d0-2ea6-3900-1f9e-b12b87839e9f, deleteOnExit=true 2024-12-05T00:26:43,430 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-12-05T00:26:43,430 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ea360916-0010-a7e3-14b6-cc9064d2e4cf/test.cache.data in system properties and HBase conf 2024-12-05T00:26:43,431 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ea360916-0010-a7e3-14b6-cc9064d2e4cf/hadoop.tmp.dir in system properties and HBase conf 2024-12-05T00:26:43,431 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ea360916-0010-a7e3-14b6-cc9064d2e4cf/hadoop.log.dir in system properties and HBase conf 2024-12-05T00:26:43,431 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ea360916-0010-a7e3-14b6-cc9064d2e4cf/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-05T00:26:43,431 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ea360916-0010-a7e3-14b6-cc9064d2e4cf/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-05T00:26:43,431 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-05T00:26:43,431 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-05T00:26:43,431 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ea360916-0010-a7e3-14b6-cc9064d2e4cf/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-05T00:26:43,431 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ea360916-0010-a7e3-14b6-cc9064d2e4cf/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-05T00:26:43,431 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ea360916-0010-a7e3-14b6-cc9064d2e4cf/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-05T00:26:43,431 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ea360916-0010-a7e3-14b6-cc9064d2e4cf/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-05T00:26:43,431 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ea360916-0010-a7e3-14b6-cc9064d2e4cf/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-05T00:26:43,431 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ea360916-0010-a7e3-14b6-cc9064d2e4cf/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-05T00:26:43,431 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ea360916-0010-a7e3-14b6-cc9064d2e4cf/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-05T00:26:43,431 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ea360916-0010-a7e3-14b6-cc9064d2e4cf/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-05T00:26:43,431 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ea360916-0010-a7e3-14b6-cc9064d2e4cf/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-05T00:26:43,431 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ea360916-0010-a7e3-14b6-cc9064d2e4cf/nfs.dump.dir in system properties and HBase conf 2024-12-05T00:26:43,432 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ea360916-0010-a7e3-14b6-cc9064d2e4cf/java.io.tmpdir in system properties and HBase conf 2024-12-05T00:26:43,432 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ea360916-0010-a7e3-14b6-cc9064d2e4cf/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-05T00:26:43,432 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ea360916-0010-a7e3-14b6-cc9064d2e4cf/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-05T00:26:43,432 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ea360916-0010-a7e3-14b6-cc9064d2e4cf/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-05T00:26:43,445 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-05T00:26:43,516 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-05T00:26:43,520 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-05T00:26:43,521 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-05T00:26:43,521 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-05T00:26:43,521 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-05T00:26:43,522 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-05T00:26:43,522 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@62e6de45{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ea360916-0010-a7e3-14b6-cc9064d2e4cf/hadoop.log.dir/,AVAILABLE} 2024-12-05T00:26:43,522 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@63f38dfa{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-05T00:26:43,637 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@701842fe{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ea360916-0010-a7e3-14b6-cc9064d2e4cf/java.io.tmpdir/jetty-localhost-35091-hadoop-hdfs-3_4_1-tests_jar-_-any-541331015801836760/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-05T00:26:43,638 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@785f0d23{HTTP/1.1, (http/1.1)}{localhost:35091} 2024-12-05T00:26:43,638 INFO [Time-limited test {}] server.Server(415): Started @184265ms 2024-12-05T00:26:43,651 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-05T00:26:43,705 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-05T00:26:43,708 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-05T00:26:43,708 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-05T00:26:43,708 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-05T00:26:43,709 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-05T00:26:43,709 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6eebb3dc{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ea360916-0010-a7e3-14b6-cc9064d2e4cf/hadoop.log.dir/,AVAILABLE} 2024-12-05T00:26:43,709 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5a296252{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-05T00:26:43,823 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2ac0122b{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ea360916-0010-a7e3-14b6-cc9064d2e4cf/java.io.tmpdir/jetty-localhost-46327-hadoop-hdfs-3_4_1-tests_jar-_-any-9180571079688174426/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-05T00:26:43,823 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@648e3649{HTTP/1.1, (http/1.1)}{localhost:46327} 2024-12-05T00:26:43,824 INFO [Time-limited test {}] server.Server(415): Started @184451ms 2024-12-05T00:26:43,825 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-05T00:26:43,855 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-05T00:26:43,858 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-05T00:26:43,858 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-05T00:26:43,858 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-05T00:26:43,858 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-05T00:26:43,859 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@46a495b0{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ea360916-0010-a7e3-14b6-cc9064d2e4cf/hadoop.log.dir/,AVAILABLE} 2024-12-05T00:26:43,859 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4cf5e3df{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-05T00:26:43,919 WARN [Thread-1633 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ea360916-0010-a7e3-14b6-cc9064d2e4cf/cluster_47ea64d0-2ea6-3900-1f9e-b12b87839e9f/data/data2/current/BP-1984325545-172.17.0.2-1733358403462/current, will proceed with Du for space computation calculation, 2024-12-05T00:26:43,919 WARN [Thread-1632 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ea360916-0010-a7e3-14b6-cc9064d2e4cf/cluster_47ea64d0-2ea6-3900-1f9e-b12b87839e9f/data/data1/current/BP-1984325545-172.17.0.2-1733358403462/current, will proceed with Du for space computation calculation, 2024-12-05T00:26:43,935 WARN [Thread-1611 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-05T00:26:43,938 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x16cbe75e786900b8 with lease ID 0x80ae8adca224bcda: Processing first storage report for DS-94ee825c-1a74-4f58-925e-0152c5500861 from datanode DatanodeRegistration(127.0.0.1:45511, datanodeUuid=0dfe5e03-f207-4f58-b680-945127692ab6, infoPort=41715, infoSecurePort=0, ipcPort=39371, storageInfo=lv=-57;cid=testClusterID;nsid=71585852;c=1733358403462) 2024-12-05T00:26:43,938 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x16cbe75e786900b8 with lease ID 0x80ae8adca224bcda: from storage DS-94ee825c-1a74-4f58-925e-0152c5500861 node DatanodeRegistration(127.0.0.1:45511, datanodeUuid=0dfe5e03-f207-4f58-b680-945127692ab6, infoPort=41715, infoSecurePort=0, ipcPort=39371, storageInfo=lv=-57;cid=testClusterID;nsid=71585852;c=1733358403462), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-05T00:26:43,938 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x16cbe75e786900b8 with lease ID 0x80ae8adca224bcda: Processing first storage report for DS-23ebaa94-1cea-45cb-911a-d763bfcb9d8d from datanode DatanodeRegistration(127.0.0.1:45511, datanodeUuid=0dfe5e03-f207-4f58-b680-945127692ab6, infoPort=41715, infoSecurePort=0, ipcPort=39371, storageInfo=lv=-57;cid=testClusterID;nsid=71585852;c=1733358403462) 2024-12-05T00:26:43,938 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x16cbe75e786900b8 with lease ID 0x80ae8adca224bcda: from storage DS-23ebaa94-1cea-45cb-911a-d763bfcb9d8d node DatanodeRegistration(127.0.0.1:45511, datanodeUuid=0dfe5e03-f207-4f58-b680-945127692ab6, infoPort=41715, infoSecurePort=0, ipcPort=39371, storageInfo=lv=-57;cid=testClusterID;nsid=71585852;c=1733358403462), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-05T00:26:43,975 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@39212263{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ea360916-0010-a7e3-14b6-cc9064d2e4cf/java.io.tmpdir/jetty-localhost-33279-hadoop-hdfs-3_4_1-tests_jar-_-any-9600947739975033893/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-05T00:26:43,976 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@10128232{HTTP/1.1, (http/1.1)}{localhost:33279} 2024-12-05T00:26:43,976 INFO [Time-limited test {}] server.Server(415): Started @184603ms 2024-12-05T00:26:43,977 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-05T00:26:44,077 WARN [Thread-1658 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ea360916-0010-a7e3-14b6-cc9064d2e4cf/cluster_47ea64d0-2ea6-3900-1f9e-b12b87839e9f/data/data3/current/BP-1984325545-172.17.0.2-1733358403462/current, will proceed with Du for space computation calculation, 2024-12-05T00:26:44,077 WARN [Thread-1659 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ea360916-0010-a7e3-14b6-cc9064d2e4cf/cluster_47ea64d0-2ea6-3900-1f9e-b12b87839e9f/data/data4/current/BP-1984325545-172.17.0.2-1733358403462/current, will proceed with Du for space computation calculation, 2024-12-05T00:26:44,094 WARN [Thread-1647 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-05T00:26:44,096 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x9d17d1b5ad59f3e3 with lease ID 0x80ae8adca224bcdb: Processing first storage report for DS-aa9697b8-6be6-4dd0-bfb3-4e602336b91c from datanode DatanodeRegistration(127.0.0.1:35063, datanodeUuid=2fb8046e-f161-4150-a543-876646e66d50, infoPort=38897, infoSecurePort=0, ipcPort=43599, storageInfo=lv=-57;cid=testClusterID;nsid=71585852;c=1733358403462) 2024-12-05T00:26:44,096 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x9d17d1b5ad59f3e3 with lease ID 0x80ae8adca224bcdb: from storage DS-aa9697b8-6be6-4dd0-bfb3-4e602336b91c node DatanodeRegistration(127.0.0.1:35063, datanodeUuid=2fb8046e-f161-4150-a543-876646e66d50, infoPort=38897, infoSecurePort=0, ipcPort=43599, storageInfo=lv=-57;cid=testClusterID;nsid=71585852;c=1733358403462), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-05T00:26:44,096 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x9d17d1b5ad59f3e3 with lease ID 0x80ae8adca224bcdb: Processing first storage report for DS-1bc02238-b567-4efc-9926-094e66c7363d from datanode DatanodeRegistration(127.0.0.1:35063, datanodeUuid=2fb8046e-f161-4150-a543-876646e66d50, infoPort=38897, infoSecurePort=0, ipcPort=43599, storageInfo=lv=-57;cid=testClusterID;nsid=71585852;c=1733358403462) 2024-12-05T00:26:44,096 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x9d17d1b5ad59f3e3 with lease ID 0x80ae8adca224bcdb: from storage DS-1bc02238-b567-4efc-9926-094e66c7363d node DatanodeRegistration(127.0.0.1:35063, datanodeUuid=2fb8046e-f161-4150-a543-876646e66d50, infoPort=38897, infoSecurePort=0, ipcPort=43599, storageInfo=lv=-57;cid=testClusterID;nsid=71585852;c=1733358403462), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-05T00:26:44,100 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ea360916-0010-a7e3-14b6-cc9064d2e4cf 2024-12-05T00:26:44,102 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ea360916-0010-a7e3-14b6-cc9064d2e4cf/cluster_47ea64d0-2ea6-3900-1f9e-b12b87839e9f/zookeeper_0, clientPort=51612, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ea360916-0010-a7e3-14b6-cc9064d2e4cf/cluster_47ea64d0-2ea6-3900-1f9e-b12b87839e9f/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ea360916-0010-a7e3-14b6-cc9064d2e4cf/cluster_47ea64d0-2ea6-3900-1f9e-b12b87839e9f/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-05T00:26:44,103 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=51612 2024-12-05T00:26:44,103 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-05T00:26:44,104 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-05T00:26:44,112 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35063 is added to blk_1073741825_1001 (size=7) 2024-12-05T00:26:44,112 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45511 is added to blk_1073741825_1001 (size=7) 2024-12-05T00:26:44,113 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:34317/user/jenkins/test-data/f2fc732a-4866-7874-b084-19a701814bc1 with version=8 2024-12-05T00:26:44,113 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:45273/user/jenkins/test-data/7f70659c-3a14-bb99-79d3-4868f5326e37/hbase-staging 2024-12-05T00:26:44,115 INFO [Time-limited test {}] client.ConnectionUtils(128): master/2113c16e5528:0 server-side Connection retries=45 2024-12-05T00:26:44,115 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-05T00:26:44,115 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-05T00:26:44,115 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-05T00:26:44,115 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-05T00:26:44,115 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-05T00:26:44,116 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-05T00:26:44,116 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-05T00:26:44,116 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:38279 2024-12-05T00:26:44,117 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:38279 connecting to ZooKeeper ensemble=127.0.0.1:51612 2024-12-05T00:26:44,125 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:382790x0, quorum=127.0.0.1:51612, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-05T00:26:44,125 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:38279-0x10180042c950000 connected 2024-12-05T00:26:44,140 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-05T00:26:44,141 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-05T00:26:44,143 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:38279-0x10180042c950000, quorum=127.0.0.1:51612, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-05T00:26:44,143 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:34317/user/jenkins/test-data/f2fc732a-4866-7874-b084-19a701814bc1, hbase.cluster.distributed=false 2024-12-05T00:26:44,144 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:38279-0x10180042c950000, quorum=127.0.0.1:51612, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-05T00:26:44,145 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=38279 2024-12-05T00:26:44,145 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=38279 2024-12-05T00:26:44,145 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=38279 2024-12-05T00:26:44,145 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=38279 2024-12-05T00:26:44,146 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=38279 2024-12-05T00:26:44,161 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/2113c16e5528:0 server-side Connection retries=45 2024-12-05T00:26:44,161 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-05T00:26:44,161 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-05T00:26:44,161 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-05T00:26:44,161 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-05T00:26:44,161 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-05T00:26:44,161 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-05T00:26:44,161 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-05T00:26:44,164 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:38997 2024-12-05T00:26:44,165 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:38997 connecting to ZooKeeper ensemble=127.0.0.1:51612 2024-12-05T00:26:44,166 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-05T00:26:44,167 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-05T00:26:44,171 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:389970x0, quorum=127.0.0.1:51612, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-05T00:26:44,171 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:389970x0, quorum=127.0.0.1:51612, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-05T00:26:44,171 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:38997-0x10180042c950001 connected 2024-12-05T00:26:44,171 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-05T00:26:44,172 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-05T00:26:44,173 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38997-0x10180042c950001, quorum=127.0.0.1:51612, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-05T00:26:44,173 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38997-0x10180042c950001, quorum=127.0.0.1:51612, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-05T00:26:44,174 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=38997 2024-12-05T00:26:44,174 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=38997 2024-12-05T00:26:44,174 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=38997 2024-12-05T00:26:44,175 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=38997 2024-12-05T00:26:44,175 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=38997 2024-12-05T00:26:44,186 DEBUG [M:0;2113c16e5528:38279 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;2113c16e5528:38279 2024-12-05T00:26:44,186 INFO [master/2113c16e5528:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/2113c16e5528,38279,1733358404115 2024-12-05T00:26:44,188 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38279-0x10180042c950000, quorum=127.0.0.1:51612, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-05T00:26:44,188 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38997-0x10180042c950001, quorum=127.0.0.1:51612, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-05T00:26:44,188 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:38279-0x10180042c950000, quorum=127.0.0.1:51612, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/2113c16e5528,38279,1733358404115 2024-12-05T00:26:44,191 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38997-0x10180042c950001, quorum=127.0.0.1:51612, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-05T00:26:44,191 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38279-0x10180042c950000, quorum=127.0.0.1:51612, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T00:26:44,191 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38997-0x10180042c950001, quorum=127.0.0.1:51612, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T00:26:44,191 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:38279-0x10180042c950000, quorum=127.0.0.1:51612, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-05T00:26:44,192 INFO [master/2113c16e5528:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/2113c16e5528,38279,1733358404115 from backup master directory 2024-12-05T00:26:44,193 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38997-0x10180042c950001, quorum=127.0.0.1:51612, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-05T00:26:44,193 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38279-0x10180042c950000, quorum=127.0.0.1:51612, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/2113c16e5528,38279,1733358404115 2024-12-05T00:26:44,193 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38279-0x10180042c950000, quorum=127.0.0.1:51612, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-05T00:26:44,193 WARN [master/2113c16e5528:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-05T00:26:44,193 INFO [master/2113c16e5528:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=2113c16e5528,38279,1733358404115 2024-12-05T00:26:44,197 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:34317/user/jenkins/test-data/f2fc732a-4866-7874-b084-19a701814bc1/hbase.id] with ID: 4e441e5d-d3ba-44cd-867a-00bf8a39c6a4 2024-12-05T00:26:44,197 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:34317/user/jenkins/test-data/f2fc732a-4866-7874-b084-19a701814bc1/.tmp/hbase.id 2024-12-05T00:26:44,201 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,45031,1733358326108/2113c16e5528%2C45031%2C1733358326108.1733358326307 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:26:44,202 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45511 is added to blk_1073741826_1002 (size=42) 2024-12-05T00:26:44,202 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35063 is added to blk_1073741826_1002 (size=42) 2024-12-05T00:26:44,203 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:34317/user/jenkins/test-data/f2fc732a-4866-7874-b084-19a701814bc1/.tmp/hbase.id]:[hdfs://localhost:34317/user/jenkins/test-data/f2fc732a-4866-7874-b084-19a701814bc1/hbase.id] 2024-12-05T00:26:44,208 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,37749,1733358324931/2113c16e5528%2C37749%2C1733358324931.meta.1733358325913.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:26:44,213 INFO [master/2113c16e5528:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-05T00:26:44,213 INFO [master/2113c16e5528:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-05T00:26:44,214 INFO [master/2113c16e5528:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-12-05T00:26:44,216 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38997-0x10180042c950001, quorum=127.0.0.1:51612, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T00:26:44,216 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38279-0x10180042c950000, quorum=127.0.0.1:51612, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T00:26:44,222 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35063 is added to blk_1073741827_1003 (size=196) 2024-12-05T00:26:44,222 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45511 is added to blk_1073741827_1003 (size=196) 2024-12-05T00:26:44,223 INFO [master/2113c16e5528:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-05T00:26:44,223 INFO [master/2113c16e5528:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-05T00:26:44,224 INFO [master/2113c16e5528:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-05T00:26:44,233 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45511 is added to blk_1073741828_1004 (size=1189) 2024-12-05T00:26:44,233 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35063 is added to blk_1073741828_1004 (size=1189) 2024-12-05T00:26:44,234 INFO [master/2113c16e5528:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:34317/user/jenkins/test-data/f2fc732a-4866-7874-b084-19a701814bc1/MasterData/data/master/store 2024-12-05T00:26:44,240 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45511 is added to blk_1073741829_1005 (size=34) 2024-12-05T00:26:44,240 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35063 is added to blk_1073741829_1005 (size=34) 2024-12-05T00:26:44,240 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T00:26:44,241 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-05T00:26:44,241 INFO [master/2113c16e5528:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-05T00:26:44,241 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-05T00:26:44,241 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-05T00:26:44,241 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-05T00:26:44,241 INFO [master/2113c16e5528:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-05T00:26:44,241 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733358404241Disabling compacts and flushes for region at 1733358404241Disabling writes for close at 1733358404241Writing region close event to WAL at 1733358404241Closed at 1733358404241 2024-12-05T00:26:44,241 WARN [master/2113c16e5528:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:34317/user/jenkins/test-data/f2fc732a-4866-7874-b084-19a701814bc1/MasterData/data/master/store/.initializing 2024-12-05T00:26:44,242 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:34317/user/jenkins/test-data/f2fc732a-4866-7874-b084-19a701814bc1/MasterData/WALs/2113c16e5528,38279,1733358404115 2024-12-05T00:26:44,244 INFO [master/2113c16e5528:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=2113c16e5528%2C38279%2C1733358404115, suffix=, logDir=hdfs://localhost:34317/user/jenkins/test-data/f2fc732a-4866-7874-b084-19a701814bc1/MasterData/WALs/2113c16e5528,38279,1733358404115, archiveDir=hdfs://localhost:34317/user/jenkins/test-data/f2fc732a-4866-7874-b084-19a701814bc1/MasterData/oldWALs, maxLogs=10 2024-12-05T00:26:44,244 INFO [master/2113c16e5528:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 2113c16e5528%2C38279%2C1733358404115.1733358404244 2024-12-05T00:26:44,249 INFO [master/2113c16e5528:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/f2fc732a-4866-7874-b084-19a701814bc1/MasterData/WALs/2113c16e5528,38279,1733358404115/2113c16e5528%2C38279%2C1733358404115.1733358404244 2024-12-05T00:26:44,252 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38897:38897),(127.0.0.1/127.0.0.1:41715:41715)] 2024-12-05T00:26:44,253 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-05T00:26:44,253 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T00:26:44,253 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-05T00:26:44,253 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-05T00:26:44,255 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-05T00:26:44,256 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-05T00:26:44,256 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T00:26:44,256 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-05T00:26:44,256 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-05T00:26:44,257 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-05T00:26:44,257 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T00:26:44,258 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T00:26:44,258 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-05T00:26:44,259 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-05T00:26:44,259 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T00:26:44,259 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T00:26:44,259 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-05T00:26:44,260 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-05T00:26:44,260 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T00:26:44,261 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T00:26:44,261 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-05T00:26:44,262 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34317/user/jenkins/test-data/f2fc732a-4866-7874-b084-19a701814bc1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-05T00:26:44,262 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34317/user/jenkins/test-data/f2fc732a-4866-7874-b084-19a701814bc1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-05T00:26:44,263 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-05T00:26:44,263 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-05T00:26:44,264 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-05T00:26:44,266 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-05T00:26:44,268 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34317/user/jenkins/test-data/f2fc732a-4866-7874-b084-19a701814bc1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-05T00:26:44,268 INFO [master/2113c16e5528:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=799943, jitterRate=0.01718124747276306}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-05T00:26:44,269 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733358404253Initializing all the Stores at 1733358404254 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733358404254Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733358404254Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733358404254Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733358404254Cleaning up temporary data from old regions at 1733358404263 (+9 ms)Region opened successfully at 1733358404269 (+6 ms) 2024-12-05T00:26:44,269 INFO [master/2113c16e5528:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-05T00:26:44,273 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@60ee0e63, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=2113c16e5528/172.17.0.2:0 2024-12-05T00:26:44,274 INFO [master/2113c16e5528:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-05T00:26:44,274 INFO [master/2113c16e5528:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-05T00:26:44,274 INFO [master/2113c16e5528:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-05T00:26:44,274 INFO [master/2113c16e5528:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-05T00:26:44,275 INFO [master/2113c16e5528:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-12-05T00:26:44,275 INFO [master/2113c16e5528:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-12-05T00:26:44,275 INFO [master/2113c16e5528:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-05T00:26:44,277 INFO [master/2113c16e5528:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-05T00:26:44,278 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38279-0x10180042c950000, quorum=127.0.0.1:51612, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-05T00:26:44,279 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-05T00:26:44,280 INFO [master/2113c16e5528:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-05T00:26:44,280 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38279-0x10180042c950000, quorum=127.0.0.1:51612, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-05T00:26:44,283 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-05T00:26:44,283 INFO [master/2113c16e5528:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-05T00:26:44,284 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38279-0x10180042c950000, quorum=127.0.0.1:51612, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-05T00:26:44,285 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-05T00:26:44,286 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38279-0x10180042c950000, quorum=127.0.0.1:51612, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-05T00:26:44,287 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-05T00:26:44,288 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38279-0x10180042c950000, quorum=127.0.0.1:51612, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-05T00:26:44,289 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-05T00:26:44,292 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38997-0x10180042c950001, quorum=127.0.0.1:51612, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-05T00:26:44,292 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38279-0x10180042c950000, quorum=127.0.0.1:51612, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-05T00:26:44,292 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38997-0x10180042c950001, quorum=127.0.0.1:51612, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T00:26:44,292 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38279-0x10180042c950000, quorum=127.0.0.1:51612, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T00:26:44,292 INFO [master/2113c16e5528:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=2113c16e5528,38279,1733358404115, sessionid=0x10180042c950000, setting cluster-up flag (Was=false) 2024-12-05T00:26:44,295 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38279-0x10180042c950000, quorum=127.0.0.1:51612, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T00:26:44,295 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38997-0x10180042c950001, quorum=127.0.0.1:51612, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T00:26:44,300 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-05T00:26:44,301 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=2113c16e5528,38279,1733358404115 2024-12-05T00:26:44,303 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38997-0x10180042c950001, quorum=127.0.0.1:51612, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T00:26:44,303 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38279-0x10180042c950000, quorum=127.0.0.1:51612, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T00:26:44,309 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-05T00:26:44,309 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=2113c16e5528,38279,1733358404115 2024-12-05T00:26:44,310 INFO [master/2113c16e5528:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:34317/user/jenkins/test-data/f2fc732a-4866-7874-b084-19a701814bc1/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-05T00:26:44,312 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-05T00:26:44,312 INFO [master/2113c16e5528:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-05T00:26:44,312 INFO [master/2113c16e5528:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-05T00:26:44,313 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 2113c16e5528,38279,1733358404115 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-05T00:26:44,314 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/2113c16e5528:0, corePoolSize=5, maxPoolSize=5 2024-12-05T00:26:44,314 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/2113c16e5528:0, corePoolSize=5, maxPoolSize=5 2024-12-05T00:26:44,314 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/2113c16e5528:0, corePoolSize=5, maxPoolSize=5 2024-12-05T00:26:44,314 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/2113c16e5528:0, corePoolSize=5, maxPoolSize=5 2024-12-05T00:26:44,314 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/2113c16e5528:0, corePoolSize=10, maxPoolSize=10 2024-12-05T00:26:44,314 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/2113c16e5528:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:26:44,314 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/2113c16e5528:0, corePoolSize=2, maxPoolSize=2 2024-12-05T00:26:44,314 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/2113c16e5528:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:26:44,315 INFO [master/2113c16e5528:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733358434315 2024-12-05T00:26:44,316 INFO [master/2113c16e5528:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-05T00:26:44,316 INFO [master/2113c16e5528:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-05T00:26:44,316 INFO [master/2113c16e5528:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-05T00:26:44,316 INFO [master/2113c16e5528:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-05T00:26:44,316 INFO [master/2113c16e5528:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-05T00:26:44,316 INFO [master/2113c16e5528:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-05T00:26:44,316 INFO [master/2113c16e5528:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-05T00:26:44,316 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-05T00:26:44,316 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-05T00:26:44,316 INFO [master/2113c16e5528:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-05T00:26:44,316 INFO [master/2113c16e5528:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-05T00:26:44,316 INFO [master/2113c16e5528:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-05T00:26:44,317 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T00:26:44,317 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-05T00:26:44,319 INFO [master/2113c16e5528:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-05T00:26:44,319 INFO [master/2113c16e5528:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-05T00:26:44,320 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/2113c16e5528:0:becomeActiveMaster-HFileCleaner.large.0-1733358404319,5,FailOnTimeoutGroup] 2024-12-05T00:26:44,320 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/2113c16e5528:0:becomeActiveMaster-HFileCleaner.small.0-1733358404320,5,FailOnTimeoutGroup] 2024-12-05T00:26:44,320 INFO [master/2113c16e5528:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-05T00:26:44,320 INFO [master/2113c16e5528:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-05T00:26:44,320 INFO [master/2113c16e5528:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-05T00:26:44,320 INFO [master/2113c16e5528:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-05T00:26:44,324 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45511 is added to blk_1073741831_1007 (size=1321) 2024-12-05T00:26:44,324 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35063 is added to blk_1073741831_1007 (size=1321) 2024-12-05T00:26:44,325 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:34317/user/jenkins/test-data/f2fc732a-4866-7874-b084-19a701814bc1/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-05T00:26:44,325 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:34317/user/jenkins/test-data/f2fc732a-4866-7874-b084-19a701814bc1 2024-12-05T00:26:44,331 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35063 is added to blk_1073741832_1008 (size=32) 2024-12-05T00:26:44,331 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45511 is added to blk_1073741832_1008 (size=32) 2024-12-05T00:26:44,332 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T00:26:44,333 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-05T00:26:44,334 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-05T00:26:44,334 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T00:26:44,334 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-05T00:26:44,335 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-05T00:26:44,336 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-05T00:26:44,336 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T00:26:44,336 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-05T00:26:44,336 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-05T00:26:44,337 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-05T00:26:44,337 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T00:26:44,337 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-05T00:26:44,338 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-05T00:26:44,338 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-05T00:26:44,339 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T00:26:44,339 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-05T00:26:44,339 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-05T00:26:44,340 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34317/user/jenkins/test-data/f2fc732a-4866-7874-b084-19a701814bc1/data/hbase/meta/1588230740 2024-12-05T00:26:44,340 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34317/user/jenkins/test-data/f2fc732a-4866-7874-b084-19a701814bc1/data/hbase/meta/1588230740 2024-12-05T00:26:44,341 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-05T00:26:44,341 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-05T00:26:44,341 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-05T00:26:44,342 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-05T00:26:44,344 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34317/user/jenkins/test-data/f2fc732a-4866-7874-b084-19a701814bc1/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-05T00:26:44,345 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=765592, jitterRate=-0.02649986743927002}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-05T00:26:44,345 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733358404332Initializing all the Stores at 1733358404332Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733358404332Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733358404332Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733358404332Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733358404332Cleaning up temporary data from old regions at 1733358404341 (+9 ms)Region opened successfully at 1733358404345 (+4 ms) 2024-12-05T00:26:44,345 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-05T00:26:44,345 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-05T00:26:44,345 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-05T00:26:44,345 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-05T00:26:44,345 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-05T00:26:44,346 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-05T00:26:44,346 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733358404345Disabling compacts and flushes for region at 1733358404345Disabling writes for close at 1733358404345Writing region close event to WAL at 1733358404346 (+1 ms)Closed at 1733358404346 2024-12-05T00:26:44,347 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-05T00:26:44,347 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-05T00:26:44,347 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-05T00:26:44,349 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-05T00:26:44,350 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-05T00:26:44,376 INFO [RS:0;2113c16e5528:38997 {}] regionserver.HRegionServer(746): ClusterId : 4e441e5d-d3ba-44cd-867a-00bf8a39c6a4 2024-12-05T00:26:44,376 DEBUG [RS:0;2113c16e5528:38997 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-05T00:26:44,378 DEBUG [RS:0;2113c16e5528:38997 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-05T00:26:44,378 DEBUG [RS:0;2113c16e5528:38997 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-05T00:26:44,381 DEBUG [RS:0;2113c16e5528:38997 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-05T00:26:44,382 DEBUG [RS:0;2113c16e5528:38997 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@261a3a0d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=2113c16e5528/172.17.0.2:0 2024-12-05T00:26:44,393 DEBUG [RS:0;2113c16e5528:38997 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;2113c16e5528:38997 2024-12-05T00:26:44,393 INFO [RS:0;2113c16e5528:38997 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-05T00:26:44,393 INFO [RS:0;2113c16e5528:38997 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-05T00:26:44,393 DEBUG [RS:0;2113c16e5528:38997 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-05T00:26:44,394 INFO [RS:0;2113c16e5528:38997 {}] regionserver.HRegionServer(2659): reportForDuty to master=2113c16e5528,38279,1733358404115 with port=38997, startcode=1733358404160 2024-12-05T00:26:44,394 DEBUG [RS:0;2113c16e5528:38997 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-05T00:26:44,396 INFO [HMaster-EventLoopGroup-12-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36265, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.5 (auth:SIMPLE), service=RegionServerStatusService 2024-12-05T00:26:44,396 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38279 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 2113c16e5528,38997,1733358404160 2024-12-05T00:26:44,396 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38279 {}] master.ServerManager(517): Registering regionserver=2113c16e5528,38997,1733358404160 2024-12-05T00:26:44,398 DEBUG [RS:0;2113c16e5528:38997 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:34317/user/jenkins/test-data/f2fc732a-4866-7874-b084-19a701814bc1 2024-12-05T00:26:44,398 DEBUG [RS:0;2113c16e5528:38997 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:34317 2024-12-05T00:26:44,398 DEBUG [RS:0;2113c16e5528:38997 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-05T00:26:44,399 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38279-0x10180042c950000, quorum=127.0.0.1:51612, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-05T00:26:44,400 DEBUG [RS:0;2113c16e5528:38997 {}] zookeeper.ZKUtil(111): regionserver:38997-0x10180042c950001, quorum=127.0.0.1:51612, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/2113c16e5528,38997,1733358404160 2024-12-05T00:26:44,400 WARN [RS:0;2113c16e5528:38997 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-05T00:26:44,400 INFO [RS:0;2113c16e5528:38997 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-05T00:26:44,400 DEBUG [RS:0;2113c16e5528:38997 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:34317/user/jenkins/test-data/f2fc732a-4866-7874-b084-19a701814bc1/WALs/2113c16e5528,38997,1733358404160 2024-12-05T00:26:44,400 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [2113c16e5528,38997,1733358404160] 2024-12-05T00:26:44,403 INFO [RS:0;2113c16e5528:38997 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-05T00:26:44,404 INFO [RS:0;2113c16e5528:38997 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-05T00:26:44,405 INFO [RS:0;2113c16e5528:38997 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-05T00:26:44,405 INFO [RS:0;2113c16e5528:38997 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-05T00:26:44,405 INFO [RS:0;2113c16e5528:38997 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-05T00:26:44,406 INFO [RS:0;2113c16e5528:38997 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-05T00:26:44,406 INFO [RS:0;2113c16e5528:38997 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-05T00:26:44,406 DEBUG [RS:0;2113c16e5528:38997 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/2113c16e5528:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:26:44,406 DEBUG [RS:0;2113c16e5528:38997 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/2113c16e5528:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:26:44,406 DEBUG [RS:0;2113c16e5528:38997 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/2113c16e5528:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:26:44,406 DEBUG [RS:0;2113c16e5528:38997 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/2113c16e5528:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:26:44,406 DEBUG [RS:0;2113c16e5528:38997 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/2113c16e5528:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:26:44,406 DEBUG [RS:0;2113c16e5528:38997 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/2113c16e5528:0, corePoolSize=2, maxPoolSize=2 2024-12-05T00:26:44,406 DEBUG [RS:0;2113c16e5528:38997 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/2113c16e5528:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:26:44,406 DEBUG [RS:0;2113c16e5528:38997 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/2113c16e5528:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:26:44,406 DEBUG [RS:0;2113c16e5528:38997 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/2113c16e5528:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:26:44,406 DEBUG [RS:0;2113c16e5528:38997 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/2113c16e5528:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:26:44,406 DEBUG [RS:0;2113c16e5528:38997 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/2113c16e5528:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:26:44,406 DEBUG [RS:0;2113c16e5528:38997 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/2113c16e5528:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:26:44,406 DEBUG [RS:0;2113c16e5528:38997 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0, corePoolSize=3, maxPoolSize=3 2024-12-05T00:26:44,406 DEBUG [RS:0;2113c16e5528:38997 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/2113c16e5528:0, corePoolSize=3, maxPoolSize=3 2024-12-05T00:26:44,408 INFO [RS:0;2113c16e5528:38997 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-05T00:26:44,408 INFO [RS:0;2113c16e5528:38997 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-05T00:26:44,408 INFO [RS:0;2113c16e5528:38997 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-05T00:26:44,408 INFO [RS:0;2113c16e5528:38997 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-05T00:26:44,408 INFO [RS:0;2113c16e5528:38997 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-05T00:26:44,408 INFO [RS:0;2113c16e5528:38997 {}] hbase.ChoreService(168): Chore ScheduledChore name=2113c16e5528,38997,1733358404160-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-05T00:26:44,423 INFO [RS:0;2113c16e5528:38997 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-05T00:26:44,423 INFO [RS:0;2113c16e5528:38997 {}] hbase.ChoreService(168): Chore ScheduledChore name=2113c16e5528,38997,1733358404160-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-05T00:26:44,423 INFO [RS:0;2113c16e5528:38997 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-05T00:26:44,423 INFO [RS:0;2113c16e5528:38997 {}] regionserver.Replication(171): 2113c16e5528,38997,1733358404160 started 2024-12-05T00:26:44,438 INFO [RS:0;2113c16e5528:38997 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-05T00:26:44,438 INFO [RS:0;2113c16e5528:38997 {}] regionserver.HRegionServer(1482): Serving as 2113c16e5528,38997,1733358404160, RpcServer on 2113c16e5528/172.17.0.2:38997, sessionid=0x10180042c950001 2024-12-05T00:26:44,438 DEBUG [RS:0;2113c16e5528:38997 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-05T00:26:44,438 DEBUG [RS:0;2113c16e5528:38997 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 2113c16e5528,38997,1733358404160 2024-12-05T00:26:44,438 DEBUG [RS:0;2113c16e5528:38997 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '2113c16e5528,38997,1733358404160' 2024-12-05T00:26:44,438 DEBUG [RS:0;2113c16e5528:38997 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-05T00:26:44,439 DEBUG [RS:0;2113c16e5528:38997 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-05T00:26:44,439 DEBUG [RS:0;2113c16e5528:38997 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-05T00:26:44,439 DEBUG [RS:0;2113c16e5528:38997 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-05T00:26:44,439 DEBUG [RS:0;2113c16e5528:38997 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 2113c16e5528,38997,1733358404160 2024-12-05T00:26:44,439 DEBUG [RS:0;2113c16e5528:38997 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '2113c16e5528,38997,1733358404160' 2024-12-05T00:26:44,439 DEBUG [RS:0;2113c16e5528:38997 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-05T00:26:44,439 DEBUG [RS:0;2113c16e5528:38997 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-05T00:26:44,440 DEBUG [RS:0;2113c16e5528:38997 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-05T00:26:44,440 INFO [RS:0;2113c16e5528:38997 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-05T00:26:44,440 INFO [RS:0;2113c16e5528:38997 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-05T00:26:44,500 WARN [2113c16e5528:38279 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-05T00:26:44,542 INFO [RS:0;2113c16e5528:38997 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=2113c16e5528%2C38997%2C1733358404160, suffix=, logDir=hdfs://localhost:34317/user/jenkins/test-data/f2fc732a-4866-7874-b084-19a701814bc1/WALs/2113c16e5528,38997,1733358404160, archiveDir=hdfs://localhost:34317/user/jenkins/test-data/f2fc732a-4866-7874-b084-19a701814bc1/oldWALs, maxLogs=32 2024-12-05T00:26:44,542 INFO [RS:0;2113c16e5528:38997 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 2113c16e5528%2C38997%2C1733358404160.1733358404542 2024-12-05T00:26:44,548 INFO [RS:0;2113c16e5528:38997 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/f2fc732a-4866-7874-b084-19a701814bc1/WALs/2113c16e5528,38997,1733358404160/2113c16e5528%2C38997%2C1733358404160.1733358404542 2024-12-05T00:26:44,549 DEBUG [RS:0;2113c16e5528:38997 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38897:38897),(127.0.0.1/127.0.0.1:41715:41715)] 2024-12-05T00:26:44,743 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-05T00:26:44,744 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-05T00:26:44,744 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart 2024-12-05T00:26:44,750 DEBUG [2113c16e5528:38279 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-05T00:26:44,751 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=2113c16e5528,38997,1733358404160 2024-12-05T00:26:44,752 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 2113c16e5528,38997,1733358404160, state=OPENING 2024-12-05T00:26:44,753 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-05T00:26:44,755 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38997-0x10180042c950001, quorum=127.0.0.1:51612, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T00:26:44,755 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38279-0x10180042c950000, quorum=127.0.0.1:51612, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T00:26:44,755 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-05T00:26:44,756 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=2113c16e5528,38997,1733358404160}] 2024-12-05T00:26:44,756 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-05T00:26:44,756 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-05T00:26:44,909 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-05T00:26:44,911 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44735, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-05T00:26:44,914 INFO [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-05T00:26:44,914 INFO [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-05T00:26:44,916 INFO [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=2113c16e5528%2C38997%2C1733358404160.meta, suffix=.meta, logDir=hdfs://localhost:34317/user/jenkins/test-data/f2fc732a-4866-7874-b084-19a701814bc1/WALs/2113c16e5528,38997,1733358404160, archiveDir=hdfs://localhost:34317/user/jenkins/test-data/f2fc732a-4866-7874-b084-19a701814bc1/oldWALs, maxLogs=32 2024-12-05T00:26:44,916 INFO [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 2113c16e5528%2C38997%2C1733358404160.meta.1733358404916.meta 2024-12-05T00:26:44,921 INFO [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/f2fc732a-4866-7874-b084-19a701814bc1/WALs/2113c16e5528,38997,1733358404160/2113c16e5528%2C38997%2C1733358404160.meta.1733358404916.meta 2024-12-05T00:26:44,923 DEBUG [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38897:38897),(127.0.0.1/127.0.0.1:41715:41715)] 2024-12-05T00:26:44,923 DEBUG [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-05T00:26:44,923 DEBUG [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-05T00:26:44,924 DEBUG [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-05T00:26:44,924 INFO [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-05T00:26:44,924 DEBUG [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-05T00:26:44,924 DEBUG [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T00:26:44,924 DEBUG [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-05T00:26:44,924 DEBUG [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-05T00:26:44,925 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-05T00:26:44,926 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-05T00:26:44,926 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T00:26:44,926 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-05T00:26:44,926 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-05T00:26:44,927 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-05T00:26:44,927 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T00:26:44,927 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-05T00:26:44,928 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-05T00:26:44,928 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-05T00:26:44,928 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T00:26:44,929 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-05T00:26:44,929 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-05T00:26:44,929 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-05T00:26:44,929 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T00:26:44,929 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-05T00:26:44,930 DEBUG [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-05T00:26:44,930 DEBUG [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34317/user/jenkins/test-data/f2fc732a-4866-7874-b084-19a701814bc1/data/hbase/meta/1588230740 2024-12-05T00:26:44,931 DEBUG [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34317/user/jenkins/test-data/f2fc732a-4866-7874-b084-19a701814bc1/data/hbase/meta/1588230740 2024-12-05T00:26:44,932 DEBUG [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-05T00:26:44,932 DEBUG [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-05T00:26:44,933 DEBUG [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-05T00:26:44,934 DEBUG [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-05T00:26:44,934 INFO [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=797625, jitterRate=0.014233112335205078}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-05T00:26:44,934 DEBUG [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-05T00:26:44,935 DEBUG [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733358404924Writing region info on filesystem at 1733358404924Initializing all the Stores at 1733358404925 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733358404925Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733358404925Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733358404925Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733358404925Cleaning up temporary data from old regions at 1733358404932 (+7 ms)Running coprocessor post-open hooks at 1733358404935 (+3 ms)Region opened successfully at 1733358404935 2024-12-05T00:26:44,936 INFO [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733358404908 2024-12-05T00:26:44,939 DEBUG [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-05T00:26:44,939 INFO [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-05T00:26:44,940 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=2113c16e5528,38997,1733358404160 2024-12-05T00:26:44,941 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 2113c16e5528,38997,1733358404160, state=OPEN 2024-12-05T00:26:44,948 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38997-0x10180042c950001, quorum=127.0.0.1:51612, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-05T00:26:44,948 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38279-0x10180042c950000, quorum=127.0.0.1:51612, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-05T00:26:44,948 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=2113c16e5528,38997,1733358404160 2024-12-05T00:26:44,948 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-05T00:26:44,948 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-05T00:26:44,951 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-05T00:26:44,951 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=2113c16e5528,38997,1733358404160 in 192 msec 2024-12-05T00:26:44,954 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-05T00:26:44,954 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 604 msec 2024-12-05T00:26:44,954 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-05T00:26:44,954 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-05T00:26:44,956 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-05T00:26:44,956 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=2113c16e5528,38997,1733358404160, seqNum=-1] 2024-12-05T00:26:44,956 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-05T00:26:44,958 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60509, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-05T00:26:44,963 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 650 msec 2024-12-05T00:26:44,963 INFO [master/2113c16e5528:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733358404963, completionTime=-1 2024-12-05T00:26:44,963 INFO [master/2113c16e5528:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-05T00:26:44,963 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-05T00:26:44,965 INFO [master/2113c16e5528:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-12-05T00:26:44,965 INFO [master/2113c16e5528:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733358464965 2024-12-05T00:26:44,965 INFO [master/2113c16e5528:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733358524965 2024-12-05T00:26:44,965 INFO [master/2113c16e5528:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 1 msec 2024-12-05T00:26:44,966 INFO [master/2113c16e5528:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=2113c16e5528,38279,1733358404115-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-05T00:26:44,966 INFO [master/2113c16e5528:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=2113c16e5528,38279,1733358404115-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-05T00:26:44,966 INFO [master/2113c16e5528:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=2113c16e5528,38279,1733358404115-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-05T00:26:44,966 INFO [master/2113c16e5528:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-2113c16e5528:38279, period=300000, unit=MILLISECONDS is enabled. 2024-12-05T00:26:44,966 INFO [master/2113c16e5528:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-05T00:26:44,966 INFO [master/2113c16e5528:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-05T00:26:44,968 DEBUG [master/2113c16e5528:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-05T00:26:44,969 INFO [master/2113c16e5528:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.776sec 2024-12-05T00:26:44,970 INFO [master/2113c16e5528:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-05T00:26:44,970 INFO [master/2113c16e5528:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-05T00:26:44,970 INFO [master/2113c16e5528:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-05T00:26:44,970 INFO [master/2113c16e5528:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-05T00:26:44,970 INFO [master/2113c16e5528:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-05T00:26:44,970 INFO [master/2113c16e5528:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=2113c16e5528,38279,1733358404115-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-05T00:26:44,970 INFO [master/2113c16e5528:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=2113c16e5528,38279,1733358404115-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-05T00:26:44,972 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-05T00:26:44,972 INFO [master/2113c16e5528:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-05T00:26:44,972 INFO [master/2113c16e5528:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=2113c16e5528,38279,1733358404115-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-05T00:26:44,976 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3a2777cc, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T00:26:44,976 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 2113c16e5528,38279,-1 for getting cluster id 2024-12-05T00:26:44,977 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-05T00:26:44,978 DEBUG [HMaster-EventLoopGroup-12-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '4e441e5d-d3ba-44cd-867a-00bf8a39c6a4' 2024-12-05T00:26:44,979 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-05T00:26:44,979 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "4e441e5d-d3ba-44cd-867a-00bf8a39c6a4" 2024-12-05T00:26:44,979 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@784ba96f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T00:26:44,979 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [2113c16e5528,38279,-1] 2024-12-05T00:26:44,979 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-05T00:26:44,979 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T00:26:44,980 INFO [HMaster-EventLoopGroup-12-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47926, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-05T00:26:44,981 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@555a4a92, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T00:26:44,982 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-05T00:26:44,983 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=2113c16e5528,38997,1733358404160, seqNum=-1] 2024-12-05T00:26:44,983 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-05T00:26:44,984 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51328, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-05T00:26:44,985 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=2113c16e5528,38279,1733358404115 2024-12-05T00:26:44,986 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-05T00:26:44,988 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-12-05T00:26:44,989 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-05T00:26:44,989 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.AsyncConnectionImpl(321): The fetched master address is 2113c16e5528,38279,1733358404115 2024-12-05T00:26:44,990 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@77448529 2024-12-05T00:26:44,990 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-05T00:26:44,991 INFO [HMaster-EventLoopGroup-12-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47934, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-05T00:26:44,991 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38279 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-12-05T00:26:44,991 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38279 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-12-05T00:26:44,991 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38279 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testCompactionRecordDoesntBlockRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-05T00:26:44,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38279 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-05T00:26:44,994 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-12-05T00:26:44,994 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T00:26:44,994 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38279 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testCompactionRecordDoesntBlockRolling" procId is: 4 2024-12-05T00:26:44,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38279 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-05T00:26:44,995 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-05T00:26:45,002 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45511 is added to blk_1073741835_1011 (size=405) 2024-12-05T00:26:45,003 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35063 is added to blk_1073741835_1011 (size=405) 2024-12-05T00:26:45,005 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 9da6c76fa6cf4f1b43cdce02d6177165, NAME => 'TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733358404991.9da6c76fa6cf4f1b43cdce02d6177165.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testCompactionRecordDoesntBlockRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:34317/user/jenkins/test-data/f2fc732a-4866-7874-b084-19a701814bc1 2024-12-05T00:26:45,011 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35063 is added to blk_1073741836_1012 (size=88) 2024-12-05T00:26:45,011 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45511 is added to blk_1073741836_1012 (size=88) 2024-12-05T00:26:45,011 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733358404991.9da6c76fa6cf4f1b43cdce02d6177165.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T00:26:45,012 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1722): Closing 9da6c76fa6cf4f1b43cdce02d6177165, disabling compactions & flushes 2024-12-05T00:26:45,012 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733358404991.9da6c76fa6cf4f1b43cdce02d6177165. 2024-12-05T00:26:45,012 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733358404991.9da6c76fa6cf4f1b43cdce02d6177165. 2024-12-05T00:26:45,012 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733358404991.9da6c76fa6cf4f1b43cdce02d6177165. after waiting 0 ms 2024-12-05T00:26:45,012 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733358404991.9da6c76fa6cf4f1b43cdce02d6177165. 2024-12-05T00:26:45,012 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733358404991.9da6c76fa6cf4f1b43cdce02d6177165. 2024-12-05T00:26:45,012 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for 9da6c76fa6cf4f1b43cdce02d6177165: Waiting for close lock at 1733358405012Disabling compacts and flushes for region at 1733358405012Disabling writes for close at 1733358405012Writing region close event to WAL at 1733358405012Closed at 1733358405012 2024-12-05T00:26:45,013 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_ADD_TO_META 2024-12-05T00:26:45,014 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733358404991.9da6c76fa6cf4f1b43cdce02d6177165.","families":{"info":[{"qualifier":"regioninfo","vlen":87,"tag":[],"timestamp":"1733358405014"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733358405014"}]},"ts":"1733358405014"} 2024-12-05T00:26:45,017 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-05T00:26:45,018 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-05T00:26:45,018 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733358405018"}]},"ts":"1733358405018"} 2024-12-05T00:26:45,021 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testCompactionRecordDoesntBlockRolling, state=ENABLING in hbase:meta 2024-12-05T00:26:45,021 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=9da6c76fa6cf4f1b43cdce02d6177165, ASSIGN}] 2024-12-05T00:26:45,023 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=9da6c76fa6cf4f1b43cdce02d6177165, ASSIGN 2024-12-05T00:26:45,024 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=9da6c76fa6cf4f1b43cdce02d6177165, ASSIGN; state=OFFLINE, location=2113c16e5528,38997,1733358404160; forceNewPlan=false, retain=false 2024-12-05T00:26:45,175 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=9da6c76fa6cf4f1b43cdce02d6177165, regionState=OPENING, regionLocation=2113c16e5528,38997,1733358404160 2024-12-05T00:26:45,177 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-13-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=9da6c76fa6cf4f1b43cdce02d6177165, ASSIGN because future has completed 2024-12-05T00:26:45,178 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 9da6c76fa6cf4f1b43cdce02d6177165, server=2113c16e5528,38997,1733358404160}] 2024-12-05T00:26:45,201 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,45031,1733358326108/2113c16e5528%2C45031%2C1733358326108.1733358326307 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:26:45,208 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,37749,1733358324931/2113c16e5528%2C37749%2C1733358324931.meta.1733358325913.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:26:45,334 INFO [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733358404991.9da6c76fa6cf4f1b43cdce02d6177165. 2024-12-05T00:26:45,334 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 9da6c76fa6cf4f1b43cdce02d6177165, NAME => 'TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733358404991.9da6c76fa6cf4f1b43cdce02d6177165.', STARTKEY => '', ENDKEY => ''} 2024-12-05T00:26:45,334 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testCompactionRecordDoesntBlockRolling 9da6c76fa6cf4f1b43cdce02d6177165 2024-12-05T00:26:45,335 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733358404991.9da6c76fa6cf4f1b43cdce02d6177165.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T00:26:45,335 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 9da6c76fa6cf4f1b43cdce02d6177165 2024-12-05T00:26:45,335 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 9da6c76fa6cf4f1b43cdce02d6177165 2024-12-05T00:26:45,336 INFO [StoreOpener-9da6c76fa6cf4f1b43cdce02d6177165-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 9da6c76fa6cf4f1b43cdce02d6177165 2024-12-05T00:26:45,337 INFO [StoreOpener-9da6c76fa6cf4f1b43cdce02d6177165-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 9da6c76fa6cf4f1b43cdce02d6177165 columnFamilyName info 2024-12-05T00:26:45,337 DEBUG [StoreOpener-9da6c76fa6cf4f1b43cdce02d6177165-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T00:26:45,338 INFO [StoreOpener-9da6c76fa6cf4f1b43cdce02d6177165-1 {}] regionserver.HStore(327): Store=9da6c76fa6cf4f1b43cdce02d6177165/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T00:26:45,338 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 9da6c76fa6cf4f1b43cdce02d6177165 2024-12-05T00:26:45,338 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34317/user/jenkins/test-data/f2fc732a-4866-7874-b084-19a701814bc1/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/9da6c76fa6cf4f1b43cdce02d6177165 2024-12-05T00:26:45,339 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34317/user/jenkins/test-data/f2fc732a-4866-7874-b084-19a701814bc1/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/9da6c76fa6cf4f1b43cdce02d6177165 2024-12-05T00:26:45,339 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 9da6c76fa6cf4f1b43cdce02d6177165 2024-12-05T00:26:45,339 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 9da6c76fa6cf4f1b43cdce02d6177165 2024-12-05T00:26:45,341 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 9da6c76fa6cf4f1b43cdce02d6177165 2024-12-05T00:26:45,342 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34317/user/jenkins/test-data/f2fc732a-4866-7874-b084-19a701814bc1/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/9da6c76fa6cf4f1b43cdce02d6177165/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-05T00:26:45,343 INFO [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 9da6c76fa6cf4f1b43cdce02d6177165; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=811840, jitterRate=0.03230859339237213}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-05T00:26:45,343 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 9da6c76fa6cf4f1b43cdce02d6177165 2024-12-05T00:26:45,343 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 9da6c76fa6cf4f1b43cdce02d6177165: Running coprocessor pre-open hook at 1733358405335Writing region info on filesystem at 1733358405335Initializing all the Stores at 1733358405335Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733358405335Cleaning up temporary data from old regions at 1733358405339 (+4 ms)Running coprocessor post-open hooks at 1733358405343 (+4 ms)Region opened successfully at 1733358405343 2024-12-05T00:26:45,344 INFO [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733358404991.9da6c76fa6cf4f1b43cdce02d6177165., pid=6, masterSystemTime=1733358405330 2024-12-05T00:26:45,347 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733358404991.9da6c76fa6cf4f1b43cdce02d6177165. 2024-12-05T00:26:45,347 INFO [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733358404991.9da6c76fa6cf4f1b43cdce02d6177165. 2024-12-05T00:26:45,348 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=9da6c76fa6cf4f1b43cdce02d6177165, regionState=OPEN, openSeqNum=2, regionLocation=2113c16e5528,38997,1733358404160 2024-12-05T00:26:45,350 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-13-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 9da6c76fa6cf4f1b43cdce02d6177165, server=2113c16e5528,38997,1733358404160 because future has completed 2024-12-05T00:26:45,354 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-12-05T00:26:45,354 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 9da6c76fa6cf4f1b43cdce02d6177165, server=2113c16e5528,38997,1733358404160 in 173 msec 2024-12-05T00:26:45,356 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-12-05T00:26:45,357 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=9da6c76fa6cf4f1b43cdce02d6177165, ASSIGN in 333 msec 2024-12-05T00:26:45,357 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-05T00:26:45,358 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733358405357"}]},"ts":"1733358405357"} 2024-12-05T00:26:45,359 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testCompactionRecordDoesntBlockRolling, state=ENABLED in hbase:meta 2024-12-05T00:26:45,361 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_POST_OPERATION 2024-12-05T00:26:45,362 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 370 msec 2024-12-05T00:26:46,202 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,45031,1733358326108/2113c16e5528%2C45031%2C1733358326108.1733358326307 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:26:46,209 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,37749,1733358324931/2113c16e5528%2C37749%2C1733358324931.meta.1733358325913.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:26:47,202 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,45031,1733358326108/2113c16e5528%2C45031%2C1733358326108.1733358326307 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:26:47,209 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,37749,1733358324931/2113c16e5528%2C37749%2C1733358324931.meta.1733358325913.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:26:48,203 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,45031,1733358326108/2113c16e5528%2C45031%2C1733358326108.1733358326307 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:26:48,209 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,37749,1733358324931/2113c16e5528%2C37749%2C1733358324931.meta.1733358325913.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:26:49,203 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,45031,1733358326108/2113c16e5528%2C45031%2C1733358326108.1733358326307 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:26:49,210 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,37749,1733358324931/2113c16e5528%2C37749%2C1733358324931.meta.1733358325913.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:26:50,204 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,45031,1733358326108/2113c16e5528%2C45031%2C1733358326108.1733358326307 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:26:50,210 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,37749,1733358324931/2113c16e5528%2C37749%2C1733358324931.meta.1733358325913.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:26:50,246 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-05T00:26:50,247 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-05T00:26:50,247 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-05T00:26:50,247 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-05T00:26:50,247 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-05T00:26:50,247 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-05T00:26:50,247 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-05T00:26:50,264 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-05T00:26:50,264 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-05T00:26:50,265 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-05T00:26:50,265 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-05T00:26:50,265 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-05T00:26:50,265 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-05T00:26:50,268 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-05T00:26:50,268 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-05T00:26:50,269 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-05T00:26:50,271 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-05T00:26:50,403 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-05T00:26:50,404 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testCompactionRecordDoesntBlockRolling' 2024-12-05T00:26:51,205 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,45031,1733358326108/2113c16e5528%2C45031%2C1733358326108.1733358326307 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:26:51,211 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,37749,1733358324931/2113c16e5528%2C37749%2C1733358324931.meta.1733358325913.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:26:52,205 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,45031,1733358326108/2113c16e5528%2C45031%2C1733358326108.1733358326307 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:26:52,211 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,37749,1733358324931/2113c16e5528%2C37749%2C1733358324931.meta.1733358325913.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:26:53,206 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,45031,1733358326108/2113c16e5528%2C45031%2C1733358326108.1733358326307 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:26:53,212 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,37749,1733358324931/2113c16e5528%2C37749%2C1733358324931.meta.1733358325913.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:26:54,206 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,45031,1733358326108/2113c16e5528%2C45031%2C1733358326108.1733358326307 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:26:54,212 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,37749,1733358324931/2113c16e5528%2C37749%2C1733358324931.meta.1733358325913.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:26:54,743 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-05T00:26:54,743 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-12-05T00:26:54,744 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-05T00:26:54,744 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-12-05T00:26:54,744 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-05T00:26:54,744 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling Metrics about Tables on a single HBase RegionServer 2024-12-05T00:26:55,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38279 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-05T00:26:55,032 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-12-05T00:26:55,032 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testCompactionRecordDoesntBlockRolling,, stopping at row=TestLogRolling-testCompactionRecordDoesntBlockRolling ,, for max=2147483647 with caching=100 2024-12-05T00:26:55,035 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-05T00:26:55,035 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733358404991.9da6c76fa6cf4f1b43cdce02d6177165. 2024-12-05T00:26:55,038 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testCompactionRecordDoesntBlockRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733358404991.9da6c76fa6cf4f1b43cdce02d6177165., hostname=2113c16e5528,38997,1733358404160, seqNum=2] 2024-12-05T00:26:55,044 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38279 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-05T00:26:55,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38279 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-05T00:26:55,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38279 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-05T00:26:55,051 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-12-05T00:26:55,052 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-05T00:26:55,054 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-05T00:26:55,207 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,45031,1733358326108/2113c16e5528%2C45031%2C1733358326108.1733358326307 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:26:55,213 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,37749,1733358324931/2113c16e5528%2C37749%2C1733358324931.meta.1733358325913.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:26:55,215 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38997 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=8 2024-12-05T00:26:55,216 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733358404991.9da6c76fa6cf4f1b43cdce02d6177165. 2024-12-05T00:26:55,216 INFO [RS_FLUSH_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2902): Flushing 9da6c76fa6cf4f1b43cdce02d6177165 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-12-05T00:26:55,232 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34317/user/jenkins/test-data/f2fc732a-4866-7874-b084-19a701814bc1/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/9da6c76fa6cf4f1b43cdce02d6177165/.tmp/info/8e3fe81af6a147e9adeaf51f0a084fc5 is 1080, key is row0001/info:/1733358415039/Put/seqid=0 2024-12-05T00:26:55,238 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35063 is added to blk_1073741837_1013 (size=6033) 2024-12-05T00:26:55,238 INFO [RS_FLUSH_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:34317/user/jenkins/test-data/f2fc732a-4866-7874-b084-19a701814bc1/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/9da6c76fa6cf4f1b43cdce02d6177165/.tmp/info/8e3fe81af6a147e9adeaf51f0a084fc5 2024-12-05T00:26:55,238 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45511 is added to blk_1073741837_1013 (size=6033) 2024-12-05T00:26:55,245 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34317/user/jenkins/test-data/f2fc732a-4866-7874-b084-19a701814bc1/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/9da6c76fa6cf4f1b43cdce02d6177165/.tmp/info/8e3fe81af6a147e9adeaf51f0a084fc5 as hdfs://localhost:34317/user/jenkins/test-data/f2fc732a-4866-7874-b084-19a701814bc1/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/9da6c76fa6cf4f1b43cdce02d6177165/info/8e3fe81af6a147e9adeaf51f0a084fc5 2024-12-05T00:26:55,251 INFO [RS_FLUSH_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34317/user/jenkins/test-data/f2fc732a-4866-7874-b084-19a701814bc1/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/9da6c76fa6cf4f1b43cdce02d6177165/info/8e3fe81af6a147e9adeaf51f0a084fc5, entries=1, sequenceid=5, filesize=5.9 K 2024-12-05T00:26:55,252 INFO [RS_FLUSH_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 9da6c76fa6cf4f1b43cdce02d6177165 in 36ms, sequenceid=5, compaction requested=false 2024-12-05T00:26:55,252 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2603): Flush status journal for 9da6c76fa6cf4f1b43cdce02d6177165: 2024-12-05T00:26:55,252 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733358404991.9da6c76fa6cf4f1b43cdce02d6177165. 2024-12-05T00:26:55,254 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=8 2024-12-05T00:26:55,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38279 {}] master.HMaster(4169): Remote procedure done, pid=8 2024-12-05T00:26:55,260 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-12-05T00:26:55,261 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 204 msec 2024-12-05T00:26:55,263 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 215 msec 2024-12-05T00:26:56,207 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,45031,1733358326108/2113c16e5528%2C45031%2C1733358326108.1733358326307 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:26:56,213 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,37749,1733358324931/2113c16e5528%2C37749%2C1733358324931.meta.1733358325913.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:26:57,208 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,45031,1733358326108/2113c16e5528%2C45031%2C1733358326108.1733358326307 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:26:57,213 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,37749,1733358324931/2113c16e5528%2C37749%2C1733358324931.meta.1733358325913.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:26:58,209 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,45031,1733358326108/2113c16e5528%2C45031%2C1733358326108.1733358326307 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:26:58,214 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,37749,1733358324931/2113c16e5528%2C37749%2C1733358324931.meta.1733358325913.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:26:59,209 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,45031,1733358326108/2113c16e5528%2C45031%2C1733358326108.1733358326307 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:26:59,214 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,37749,1733358324931/2113c16e5528%2C37749%2C1733358324931.meta.1733358325913.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:27:00,210 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,45031,1733358326108/2113c16e5528%2C45031%2C1733358326108.1733358326307 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:27:00,215 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,37749,1733358324931/2113c16e5528%2C37749%2C1733358324931.meta.1733358325913.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:27:01,210 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,45031,1733358326108/2113c16e5528%2C45031%2C1733358326108.1733358326307 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:27:01,215 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,37749,1733358324931/2113c16e5528%2C37749%2C1733358324931.meta.1733358325913.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:27:02,211 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,45031,1733358326108/2113c16e5528%2C45031%2C1733358326108.1733358326307 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:27:02,216 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,37749,1733358324931/2113c16e5528%2C37749%2C1733358324931.meta.1733358325913.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:27:03,212 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,45031,1733358326108/2113c16e5528%2C45031%2C1733358326108.1733358326307 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:27:03,216 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,37749,1733358324931/2113c16e5528%2C37749%2C1733358324931.meta.1733358325913.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:27:04,212 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,45031,1733358326108/2113c16e5528%2C45031%2C1733358326108.1733358326307 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:27:04,217 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,37749,1733358324931/2113c16e5528%2C37749%2C1733358324931.meta.1733358325913.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:27:05,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38279 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-05T00:27:05,132 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-12-05T00:27:05,135 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38279 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-05T00:27:05,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38279 {}] procedure2.ProcedureExecutor(1139): Stored pid=9, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-05T00:27:05,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38279 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=9 2024-12-05T00:27:05,137 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=9, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-12-05T00:27:05,138 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=9, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-05T00:27:05,138 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-05T00:27:05,213 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,45031,1733358326108/2113c16e5528%2C45031%2C1733358326108.1733358326307 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:27:05,217 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,37749,1733358324931/2113c16e5528%2C37749%2C1733358324931.meta.1733358325913.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:27:05,291 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38997 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=10 2024-12-05T00:27:05,292 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733358404991.9da6c76fa6cf4f1b43cdce02d6177165. 2024-12-05T00:27:05,292 INFO [RS_FLUSH_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(2902): Flushing 9da6c76fa6cf4f1b43cdce02d6177165 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-12-05T00:27:05,297 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34317/user/jenkins/test-data/f2fc732a-4866-7874-b084-19a701814bc1/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/9da6c76fa6cf4f1b43cdce02d6177165/.tmp/info/7f1fd359923d444c890c303e043130ea is 1080, key is row0002/info:/1733358425133/Put/seqid=0 2024-12-05T00:27:05,302 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45511 is added to blk_1073741838_1014 (size=6033) 2024-12-05T00:27:05,302 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35063 is added to blk_1073741838_1014 (size=6033) 2024-12-05T00:27:05,303 INFO [RS_FLUSH_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=9 (bloomFilter=true), to=hdfs://localhost:34317/user/jenkins/test-data/f2fc732a-4866-7874-b084-19a701814bc1/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/9da6c76fa6cf4f1b43cdce02d6177165/.tmp/info/7f1fd359923d444c890c303e043130ea 2024-12-05T00:27:05,309 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34317/user/jenkins/test-data/f2fc732a-4866-7874-b084-19a701814bc1/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/9da6c76fa6cf4f1b43cdce02d6177165/.tmp/info/7f1fd359923d444c890c303e043130ea as hdfs://localhost:34317/user/jenkins/test-data/f2fc732a-4866-7874-b084-19a701814bc1/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/9da6c76fa6cf4f1b43cdce02d6177165/info/7f1fd359923d444c890c303e043130ea 2024-12-05T00:27:05,314 INFO [RS_FLUSH_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34317/user/jenkins/test-data/f2fc732a-4866-7874-b084-19a701814bc1/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/9da6c76fa6cf4f1b43cdce02d6177165/info/7f1fd359923d444c890c303e043130ea, entries=1, sequenceid=9, filesize=5.9 K 2024-12-05T00:27:05,315 INFO [RS_FLUSH_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 9da6c76fa6cf4f1b43cdce02d6177165 in 23ms, sequenceid=9, compaction requested=false 2024-12-05T00:27:05,315 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(2603): Flush status journal for 9da6c76fa6cf4f1b43cdce02d6177165: 2024-12-05T00:27:05,315 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733358404991.9da6c76fa6cf4f1b43cdce02d6177165. 2024-12-05T00:27:05,315 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=10 2024-12-05T00:27:05,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38279 {}] master.HMaster(4169): Remote procedure done, pid=10 2024-12-05T00:27:05,319 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=10, resume processing ppid=9 2024-12-05T00:27:05,319 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=9, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 179 msec 2024-12-05T00:27:05,321 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 185 msec 2024-12-05T00:27:06,213 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,45031,1733358326108/2113c16e5528%2C45031%2C1733358326108.1733358326307 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:27:06,217 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,37749,1733358324931/2113c16e5528%2C37749%2C1733358324931.meta.1733358325913.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:27:07,214 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,45031,1733358326108/2113c16e5528%2C45031%2C1733358326108.1733358326307 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:27:07,218 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,37749,1733358324931/2113c16e5528%2C37749%2C1733358324931.meta.1733358325913.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:27:08,214 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,45031,1733358326108/2113c16e5528%2C45031%2C1733358326108.1733358326307 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:27:08,218 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,37749,1733358324931/2113c16e5528%2C37749%2C1733358324931.meta.1733358325913.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:27:09,215 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,45031,1733358326108/2113c16e5528%2C45031%2C1733358326108.1733358326307 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:27:09,219 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,37749,1733358324931/2113c16e5528%2C37749%2C1733358324931.meta.1733358325913.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:27:10,215 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,45031,1733358326108/2113c16e5528%2C45031%2C1733358326108.1733358326307 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:27:10,219 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,37749,1733358324931/2113c16e5528%2C37749%2C1733358324931.meta.1733358325913.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:27:11,216 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,45031,1733358326108/2113c16e5528%2C45031%2C1733358326108.1733358326307 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:27:11,217 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=2 on file=hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,45031,1733358326108/2113c16e5528%2C45031%2C1733358326108.1733358326307 after 68045ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor205.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-05T00:27:11,220 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,37749,1733358324931/2113c16e5528%2C37749%2C1733358324931.meta.1733358325913.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:27:11,220 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=2 on file=hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,37749,1733358324931/2113c16e5528%2C37749%2C1733358324931.meta.1733358325913.meta after 68031ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor205.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-05T00:27:12,217 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,45031,1733358326108/2113c16e5528%2C45031%2C1733358326108.1733358326307 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:27:12,220 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,37749,1733358324931/2113c16e5528%2C37749%2C1733358324931.meta.1733358325913.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:27:13,217 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,45031,1733358326108/2113c16e5528%2C45031%2C1733358326108.1733358326307 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:27:13,221 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,37749,1733358324931/2113c16e5528%2C37749%2C1733358324931.meta.1733358325913.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:27:14,099 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-05T00:27:14,218 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,45031,1733358326108/2113c16e5528%2C45031%2C1733358326108.1733358326307 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:27:14,221 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,37749,1733358324931/2113c16e5528%2C37749%2C1733358324931.meta.1733358325913.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:27:15,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38279 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=9 2024-12-05T00:27:15,201 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-12-05T00:27:15,204 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 2113c16e5528%2C38997%2C1733358404160.1733358435204 2024-12-05T00:27:15,210 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:27:15,210 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:27:15,211 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:27:15,211 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:27:15,211 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:27:15,211 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/f2fc732a-4866-7874-b084-19a701814bc1/WALs/2113c16e5528,38997,1733358404160/2113c16e5528%2C38997%2C1733358404160.1733358404542 with entries=8, filesize=5.41 KB; new WAL /user/jenkins/test-data/f2fc732a-4866-7874-b084-19a701814bc1/WALs/2113c16e5528,38997,1733358404160/2113c16e5528%2C38997%2C1733358404160.1733358435204 2024-12-05T00:27:15,212 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41715:41715),(127.0.0.1/127.0.0.1:38897:38897)] 2024-12-05T00:27:15,212 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:34317/user/jenkins/test-data/f2fc732a-4866-7874-b084-19a701814bc1/WALs/2113c16e5528,38997,1733358404160/2113c16e5528%2C38997%2C1733358404160.1733358404542 is not closed yet, will try archiving it next time 2024-12-05T00:27:15,212 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38279 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-05T00:27:15,213 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45511 is added to blk_1073741833_1009 (size=5546) 2024-12-05T00:27:15,213 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35063 is added to blk_1073741833_1009 (size=5546) 2024-12-05T00:27:15,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38279 {}] procedure2.ProcedureExecutor(1139): Stored pid=11, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-05T00:27:15,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38279 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=11 2024-12-05T00:27:15,215 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=11, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-12-05T00:27:15,216 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=11, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-05T00:27:15,216 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=12, ppid=11, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-05T00:27:15,218 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,45031,1733358326108/2113c16e5528%2C45031%2C1733358326108.1733358326307 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:27:15,222 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,37749,1733358324931/2113c16e5528%2C37749%2C1733358324931.meta.1733358325913.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:27:15,369 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38997 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=12 2024-12-05T00:27:15,369 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2113c16e5528:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733358404991.9da6c76fa6cf4f1b43cdce02d6177165. 2024-12-05T00:27:15,370 INFO [RS_FLUSH_OPERATIONS-regionserver/2113c16e5528:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(2902): Flushing 9da6c76fa6cf4f1b43cdce02d6177165 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-12-05T00:27:15,374 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2113c16e5528:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34317/user/jenkins/test-data/f2fc732a-4866-7874-b084-19a701814bc1/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/9da6c76fa6cf4f1b43cdce02d6177165/.tmp/info/ecc49a4622b24e3891fb12945d6a9bf8 is 1080, key is row0003/info:/1733358435202/Put/seqid=0 2024-12-05T00:27:15,379 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35063 is added to blk_1073741840_1016 (size=6033) 2024-12-05T00:27:15,379 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45511 is added to blk_1073741840_1016 (size=6033) 2024-12-05T00:27:15,379 INFO [RS_FLUSH_OPERATIONS-regionserver/2113c16e5528:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:34317/user/jenkins/test-data/f2fc732a-4866-7874-b084-19a701814bc1/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/9da6c76fa6cf4f1b43cdce02d6177165/.tmp/info/ecc49a4622b24e3891fb12945d6a9bf8 2024-12-05T00:27:15,386 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2113c16e5528:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34317/user/jenkins/test-data/f2fc732a-4866-7874-b084-19a701814bc1/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/9da6c76fa6cf4f1b43cdce02d6177165/.tmp/info/ecc49a4622b24e3891fb12945d6a9bf8 as hdfs://localhost:34317/user/jenkins/test-data/f2fc732a-4866-7874-b084-19a701814bc1/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/9da6c76fa6cf4f1b43cdce02d6177165/info/ecc49a4622b24e3891fb12945d6a9bf8 2024-12-05T00:27:15,390 INFO [RS_FLUSH_OPERATIONS-regionserver/2113c16e5528:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34317/user/jenkins/test-data/f2fc732a-4866-7874-b084-19a701814bc1/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/9da6c76fa6cf4f1b43cdce02d6177165/info/ecc49a4622b24e3891fb12945d6a9bf8, entries=1, sequenceid=13, filesize=5.9 K 2024-12-05T00:27:15,391 INFO [RS_FLUSH_OPERATIONS-regionserver/2113c16e5528:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 9da6c76fa6cf4f1b43cdce02d6177165 in 21ms, sequenceid=13, compaction requested=true 2024-12-05T00:27:15,392 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2113c16e5528:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(2603): Flush status journal for 9da6c76fa6cf4f1b43cdce02d6177165: 2024-12-05T00:27:15,392 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2113c16e5528:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733358404991.9da6c76fa6cf4f1b43cdce02d6177165. 2024-12-05T00:27:15,392 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2113c16e5528:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=12 2024-12-05T00:27:15,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38279 {}] master.HMaster(4169): Remote procedure done, pid=12 2024-12-05T00:27:15,396 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=12, resume processing ppid=11 2024-12-05T00:27:15,396 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, ppid=11, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 177 msec 2024-12-05T00:27:15,398 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 185 msec 2024-12-05T00:27:16,219 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,45031,1733358326108/2113c16e5528%2C45031%2C1733358326108.1733358326307 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:27:16,222 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,37749,1733358324931/2113c16e5528%2C37749%2C1733358324931.meta.1733358325913.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:27:17,220 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,45031,1733358326108/2113c16e5528%2C45031%2C1733358326108.1733358326307 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:27:17,222 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,37749,1733358324931/2113c16e5528%2C37749%2C1733358324931.meta.1733358325913.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:27:18,220 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,45031,1733358326108/2113c16e5528%2C45031%2C1733358326108.1733358326307 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:27:18,223 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,37749,1733358324931/2113c16e5528%2C37749%2C1733358324931.meta.1733358325913.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:27:19,221 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,45031,1733358326108/2113c16e5528%2C45031%2C1733358326108.1733358326307 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:27:19,223 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,37749,1733358324931/2113c16e5528%2C37749%2C1733358324931.meta.1733358325913.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:27:20,221 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,45031,1733358326108/2113c16e5528%2C45031%2C1733358326108.1733358326307 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:27:20,224 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,37749,1733358324931/2113c16e5528%2C37749%2C1733358324931.meta.1733358325913.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:27:21,222 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,45031,1733358326108/2113c16e5528%2C45031%2C1733358326108.1733358326307 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:27:21,224 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,37749,1733358324931/2113c16e5528%2C37749%2C1733358324931.meta.1733358325913.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:27:22,222 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,45031,1733358326108/2113c16e5528%2C45031%2C1733358326108.1733358326307 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:27:22,225 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,37749,1733358324931/2113c16e5528%2C37749%2C1733358324931.meta.1733358325913.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:27:23,223 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,45031,1733358326108/2113c16e5528%2C45031%2C1733358326108.1733358326307 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:27:23,225 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,37749,1733358324931/2113c16e5528%2C37749%2C1733358324931.meta.1733358325913.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:27:24,224 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,45031,1733358326108/2113c16e5528%2C45031%2C1733358326108.1733358326307 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:27:24,226 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,37749,1733358324931/2113c16e5528%2C37749%2C1733358324931.meta.1733358325913.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:27:24,982 INFO [master/2113c16e5528:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-12-05T00:27:24,982 INFO [master/2113c16e5528:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-12-05T00:27:25,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38279 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=11 2024-12-05T00:27:25,222 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-12-05T00:27:25,222 DEBUG [Time-limited test {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-05T00:27:25,223 DEBUG [Time-limited test {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 18099 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-05T00:27:25,223 DEBUG [Time-limited test {}] regionserver.HStore(1541): 9da6c76fa6cf4f1b43cdce02d6177165/info is initiating minor compaction (all files) 2024-12-05T00:27:25,223 INFO [Time-limited test {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-05T00:27:25,223 INFO [Time-limited test {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-05T00:27:25,223 INFO [Time-limited test {}] regionserver.HRegion(2416): Starting compaction of 9da6c76fa6cf4f1b43cdce02d6177165/info in TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733358404991.9da6c76fa6cf4f1b43cdce02d6177165. 2024-12-05T00:27:25,224 INFO [Time-limited test {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34317/user/jenkins/test-data/f2fc732a-4866-7874-b084-19a701814bc1/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/9da6c76fa6cf4f1b43cdce02d6177165/info/8e3fe81af6a147e9adeaf51f0a084fc5, hdfs://localhost:34317/user/jenkins/test-data/f2fc732a-4866-7874-b084-19a701814bc1/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/9da6c76fa6cf4f1b43cdce02d6177165/info/7f1fd359923d444c890c303e043130ea, hdfs://localhost:34317/user/jenkins/test-data/f2fc732a-4866-7874-b084-19a701814bc1/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/9da6c76fa6cf4f1b43cdce02d6177165/info/ecc49a4622b24e3891fb12945d6a9bf8] into tmpdir=hdfs://localhost:34317/user/jenkins/test-data/f2fc732a-4866-7874-b084-19a701814bc1/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/9da6c76fa6cf4f1b43cdce02d6177165/.tmp, totalSize=17.7 K 2024-12-05T00:27:25,224 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 8e3fe81af6a147e9adeaf51f0a084fc5, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=5, earliestPutTs=1733358415039 2024-12-05T00:27:25,224 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,45031,1733358326108/2113c16e5528%2C45031%2C1733358326108.1733358326307 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:27:25,224 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 7f1fd359923d444c890c303e043130ea, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=9, earliestPutTs=1733358425133 2024-12-05T00:27:25,225 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting ecc49a4622b24e3891fb12945d6a9bf8, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1733358435202 2024-12-05T00:27:25,226 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,37749,1733358324931/2113c16e5528%2C37749%2C1733358324931.meta.1733358325913.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:27:25,235 INFO [Time-limited test {}] throttle.PressureAwareThroughputController(145): 9da6c76fa6cf4f1b43cdce02d6177165#info#compaction#44 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-05T00:27:25,236 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34317/user/jenkins/test-data/f2fc732a-4866-7874-b084-19a701814bc1/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/9da6c76fa6cf4f1b43cdce02d6177165/.tmp/info/3ddad75543a24a45bea89836731e2a38 is 1080, key is row0001/info:/1733358415039/Put/seqid=0 2024-12-05T00:27:25,242 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35063 is added to blk_1073741841_1017 (size=8296) 2024-12-05T00:27:25,242 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45511 is added to blk_1073741841_1017 (size=8296) 2024-12-05T00:27:25,249 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34317/user/jenkins/test-data/f2fc732a-4866-7874-b084-19a701814bc1/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/9da6c76fa6cf4f1b43cdce02d6177165/.tmp/info/3ddad75543a24a45bea89836731e2a38 as hdfs://localhost:34317/user/jenkins/test-data/f2fc732a-4866-7874-b084-19a701814bc1/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/9da6c76fa6cf4f1b43cdce02d6177165/info/3ddad75543a24a45bea89836731e2a38 2024-12-05T00:27:25,256 INFO [Time-limited test {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 9da6c76fa6cf4f1b43cdce02d6177165/info of 9da6c76fa6cf4f1b43cdce02d6177165 into 3ddad75543a24a45bea89836731e2a38(size=8.1 K), total size for store is 8.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-05T00:27:25,256 DEBUG [Time-limited test {}] regionserver.HRegion(2446): Compaction status journal for 9da6c76fa6cf4f1b43cdce02d6177165: 2024-12-05T00:27:25,259 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 2113c16e5528%2C38997%2C1733358404160.1733358445258 2024-12-05T00:27:25,264 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:27:25,264 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:27:25,264 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:27:25,264 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:27:25,264 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:27:25,264 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/f2fc732a-4866-7874-b084-19a701814bc1/WALs/2113c16e5528,38997,1733358404160/2113c16e5528%2C38997%2C1733358404160.1733358435204 with entries=4, filesize=2.45 KB; new WAL /user/jenkins/test-data/f2fc732a-4866-7874-b084-19a701814bc1/WALs/2113c16e5528,38997,1733358404160/2113c16e5528%2C38997%2C1733358404160.1733358445258 2024-12-05T00:27:25,266 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41715:41715),(127.0.0.1/127.0.0.1:38897:38897)] 2024-12-05T00:27:25,266 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:34317/user/jenkins/test-data/f2fc732a-4866-7874-b084-19a701814bc1/WALs/2113c16e5528,38997,1733358404160/2113c16e5528%2C38997%2C1733358404160.1733358435204 is not closed yet, will try archiving it next time 2024-12-05T00:27:25,266 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35063 is added to blk_1073741839_1015 (size=2520) 2024-12-05T00:27:25,266 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45511 is added to blk_1073741839_1015 (size=2520) 2024-12-05T00:27:25,268 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:34317/user/jenkins/test-data/f2fc732a-4866-7874-b084-19a701814bc1/WALs/2113c16e5528,38997,1733358404160/2113c16e5528%2C38997%2C1733358404160.1733358404542 to hdfs://localhost:34317/user/jenkins/test-data/f2fc732a-4866-7874-b084-19a701814bc1/oldWALs/2113c16e5528%2C38997%2C1733358404160.1733358404542 2024-12-05T00:27:25,268 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38279 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-05T00:27:25,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38279 {}] procedure2.ProcedureExecutor(1139): Stored pid=13, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-05T00:27:25,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38279 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=13 2024-12-05T00:27:25,271 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=13, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-12-05T00:27:25,272 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=13, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-05T00:27:25,272 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=14, ppid=13, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-05T00:27:25,425 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38997 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=14 2024-12-05T00:27:25,425 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733358404991.9da6c76fa6cf4f1b43cdce02d6177165. 2024-12-05T00:27:25,425 INFO [RS_FLUSH_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(2902): Flushing 9da6c76fa6cf4f1b43cdce02d6177165 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-12-05T00:27:25,429 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34317/user/jenkins/test-data/f2fc732a-4866-7874-b084-19a701814bc1/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/9da6c76fa6cf4f1b43cdce02d6177165/.tmp/info/f47507ea6c1b4b788944b55d42d28d84 is 1080, key is row0000/info:/1733358445257/Put/seqid=0 2024-12-05T00:27:25,435 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35063 is added to blk_1073741843_1019 (size=6033) 2024-12-05T00:27:25,435 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45511 is added to blk_1073741843_1019 (size=6033) 2024-12-05T00:27:25,436 INFO [RS_FLUSH_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=18 (bloomFilter=true), to=hdfs://localhost:34317/user/jenkins/test-data/f2fc732a-4866-7874-b084-19a701814bc1/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/9da6c76fa6cf4f1b43cdce02d6177165/.tmp/info/f47507ea6c1b4b788944b55d42d28d84 2024-12-05T00:27:25,441 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34317/user/jenkins/test-data/f2fc732a-4866-7874-b084-19a701814bc1/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/9da6c76fa6cf4f1b43cdce02d6177165/.tmp/info/f47507ea6c1b4b788944b55d42d28d84 as hdfs://localhost:34317/user/jenkins/test-data/f2fc732a-4866-7874-b084-19a701814bc1/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/9da6c76fa6cf4f1b43cdce02d6177165/info/f47507ea6c1b4b788944b55d42d28d84 2024-12-05T00:27:25,446 INFO [RS_FLUSH_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34317/user/jenkins/test-data/f2fc732a-4866-7874-b084-19a701814bc1/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/9da6c76fa6cf4f1b43cdce02d6177165/info/f47507ea6c1b4b788944b55d42d28d84, entries=1, sequenceid=18, filesize=5.9 K 2024-12-05T00:27:25,447 INFO [RS_FLUSH_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 9da6c76fa6cf4f1b43cdce02d6177165 in 22ms, sequenceid=18, compaction requested=false 2024-12-05T00:27:25,447 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(2603): Flush status journal for 9da6c76fa6cf4f1b43cdce02d6177165: 2024-12-05T00:27:25,447 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733358404991.9da6c76fa6cf4f1b43cdce02d6177165. 2024-12-05T00:27:25,447 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=14 2024-12-05T00:27:25,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38279 {}] master.HMaster(4169): Remote procedure done, pid=14 2024-12-05T00:27:25,451 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=14, resume processing ppid=13 2024-12-05T00:27:25,451 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=14, ppid=13, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 177 msec 2024-12-05T00:27:25,454 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 184 msec 2024-12-05T00:27:26,225 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,45031,1733358326108/2113c16e5528%2C45031%2C1733358326108.1733358326307 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:27:26,226 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,37749,1733358324931/2113c16e5528%2C37749%2C1733358324931.meta.1733358325913.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:27:27,225 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,45031,1733358326108/2113c16e5528%2C45031%2C1733358326108.1733358326307 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:27:27,227 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,37749,1733358324931/2113c16e5528%2C37749%2C1733358324931.meta.1733358325913.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:27:28,226 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,45031,1733358326108/2113c16e5528%2C45031%2C1733358326108.1733358326307 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:27:28,227 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,37749,1733358324931/2113c16e5528%2C37749%2C1733358324931.meta.1733358325913.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:27:29,226 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,45031,1733358326108/2113c16e5528%2C45031%2C1733358326108.1733358326307 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:27:29,228 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,37749,1733358324931/2113c16e5528%2C37749%2C1733358324931.meta.1733358325913.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:27:30,227 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,45031,1733358326108/2113c16e5528%2C45031%2C1733358326108.1733358326307 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:27:30,228 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,37749,1733358324931/2113c16e5528%2C37749%2C1733358324931.meta.1733358325913.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:27:30,335 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 9da6c76fa6cf4f1b43cdce02d6177165, had cached 0 bytes from a total of 14329 2024-12-05T00:27:31,227 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,45031,1733358326108/2113c16e5528%2C45031%2C1733358326108.1733358326307 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:27:31,228 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,37749,1733358324931/2113c16e5528%2C37749%2C1733358324931.meta.1733358325913.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:27:32,228 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,45031,1733358326108/2113c16e5528%2C45031%2C1733358326108.1733358326307 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:27:32,229 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,37749,1733358324931/2113c16e5528%2C37749%2C1733358324931.meta.1733358325913.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:27:33,228 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,45031,1733358326108/2113c16e5528%2C45031%2C1733358326108.1733358326307 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:27:33,229 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,37749,1733358324931/2113c16e5528%2C37749%2C1733358324931.meta.1733358325913.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:27:34,229 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,45031,1733358326108/2113c16e5528%2C45031%2C1733358326108.1733358326307 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:27:34,230 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,37749,1733358324931/2113c16e5528%2C37749%2C1733358324931.meta.1733358325913.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:27:35,229 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,45031,1733358326108/2113c16e5528%2C45031%2C1733358326108.1733358326307 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:27:35,230 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,37749,1733358324931/2113c16e5528%2C37749%2C1733358324931.meta.1733358325913.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:27:35,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38279 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=13 2024-12-05T00:27:35,292 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-12-05T00:27:35,294 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 2113c16e5528%2C38997%2C1733358404160.1733358455294 2024-12-05T00:27:35,300 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:27:35,300 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:27:35,300 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:27:35,300 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:27:35,300 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:27:35,301 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/f2fc732a-4866-7874-b084-19a701814bc1/WALs/2113c16e5528,38997,1733358404160/2113c16e5528%2C38997%2C1733358404160.1733358445258 with entries=3, filesize=1.97 KB; new WAL /user/jenkins/test-data/f2fc732a-4866-7874-b084-19a701814bc1/WALs/2113c16e5528,38997,1733358404160/2113c16e5528%2C38997%2C1733358404160.1733358455294 2024-12-05T00:27:35,301 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41715:41715),(127.0.0.1/127.0.0.1:38897:38897)] 2024-12-05T00:27:35,301 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:34317/user/jenkins/test-data/f2fc732a-4866-7874-b084-19a701814bc1/WALs/2113c16e5528,38997,1733358404160/2113c16e5528%2C38997%2C1733358404160.1733358445258 is not closed yet, will try archiving it next time 2024-12-05T00:27:35,301 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-05T00:27:35,301 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:34317/user/jenkins/test-data/f2fc732a-4866-7874-b084-19a701814bc1/WALs/2113c16e5528,38997,1733358404160/2113c16e5528%2C38997%2C1733358404160.1733358435204 to hdfs://localhost:34317/user/jenkins/test-data/f2fc732a-4866-7874-b084-19a701814bc1/oldWALs/2113c16e5528%2C38997%2C1733358404160.1733358435204 2024-12-05T00:27:35,302 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-05T00:27:35,302 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-05T00:27:35,302 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T00:27:35,302 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T00:27:35,302 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35063 is added to blk_1073741842_1018 (size=2026) 2024-12-05T00:27:35,302 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-05T00:27:35,302 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-05T00:27:35,302 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=9164491, stopped=false 2024-12-05T00:27:35,302 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=2113c16e5528,38279,1733358404115 2024-12-05T00:27:35,302 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45511 is added to blk_1073741842_1018 (size=2026) 2024-12-05T00:27:35,304 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38997-0x10180042c950001, quorum=127.0.0.1:51612, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-05T00:27:35,304 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38279-0x10180042c950000, quorum=127.0.0.1:51612, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-05T00:27:35,304 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38279-0x10180042c950000, quorum=127.0.0.1:51612, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T00:27:35,304 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38997-0x10180042c950001, quorum=127.0.0.1:51612, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T00:27:35,304 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-05T00:27:35,304 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-05T00:27:35,304 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-05T00:27:35,304 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T00:27:35,305 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '2113c16e5528,38997,1733358404160' ***** 2024-12-05T00:27:35,305 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-05T00:27:35,305 INFO [RS:0;2113c16e5528:38997 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-05T00:27:35,305 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:38997-0x10180042c950001, quorum=127.0.0.1:51612, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-05T00:27:35,305 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-05T00:27:35,305 INFO [RS:0;2113c16e5528:38997 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-05T00:27:35,305 INFO [RS:0;2113c16e5528:38997 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-05T00:27:35,305 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:38279-0x10180042c950000, quorum=127.0.0.1:51612, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-05T00:27:35,305 INFO [RS:0;2113c16e5528:38997 {}] regionserver.HRegionServer(3091): Received CLOSE for 9da6c76fa6cf4f1b43cdce02d6177165 2024-12-05T00:27:35,305 INFO [RS:0;2113c16e5528:38997 {}] regionserver.HRegionServer(959): stopping server 2113c16e5528,38997,1733358404160 2024-12-05T00:27:35,306 INFO [RS:0;2113c16e5528:38997 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-05T00:27:35,306 INFO [RS:0;2113c16e5528:38997 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;2113c16e5528:38997. 2024-12-05T00:27:35,306 DEBUG [RS:0;2113c16e5528:38997 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-05T00:27:35,306 DEBUG [RS:0;2113c16e5528:38997 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T00:27:35,306 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 9da6c76fa6cf4f1b43cdce02d6177165, disabling compactions & flushes 2024-12-05T00:27:35,306 INFO [RS:0;2113c16e5528:38997 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-05T00:27:35,306 INFO [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733358404991.9da6c76fa6cf4f1b43cdce02d6177165. 2024-12-05T00:27:35,306 INFO [RS:0;2113c16e5528:38997 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-05T00:27:35,306 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733358404991.9da6c76fa6cf4f1b43cdce02d6177165. 2024-12-05T00:27:35,306 INFO [RS:0;2113c16e5528:38997 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-05T00:27:35,306 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733358404991.9da6c76fa6cf4f1b43cdce02d6177165. after waiting 0 ms 2024-12-05T00:27:35,306 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733358404991.9da6c76fa6cf4f1b43cdce02d6177165. 2024-12-05T00:27:35,306 INFO [RS:0;2113c16e5528:38997 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-05T00:27:35,306 INFO [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing 9da6c76fa6cf4f1b43cdce02d6177165 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-12-05T00:27:35,306 INFO [RS:0;2113c16e5528:38997 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-12-05T00:27:35,306 DEBUG [RS:0;2113c16e5528:38997 {}] regionserver.HRegionServer(1325): Online Regions={9da6c76fa6cf4f1b43cdce02d6177165=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733358404991.9da6c76fa6cf4f1b43cdce02d6177165., 1588230740=hbase:meta,,1.1588230740} 2024-12-05T00:27:35,306 DEBUG [RS:0;2113c16e5528:38997 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 9da6c76fa6cf4f1b43cdce02d6177165 2024-12-05T00:27:35,306 DEBUG [RS_CLOSE_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-05T00:27:35,306 INFO [RS_CLOSE_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-05T00:27:35,306 DEBUG [RS_CLOSE_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-05T00:27:35,306 DEBUG [RS_CLOSE_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-05T00:27:35,306 DEBUG [RS_CLOSE_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-05T00:27:35,306 INFO [RS_CLOSE_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.89 KB heapSize=3.91 KB 2024-12-05T00:27:35,310 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34317/user/jenkins/test-data/f2fc732a-4866-7874-b084-19a701814bc1/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/9da6c76fa6cf4f1b43cdce02d6177165/.tmp/info/6954993b71ae4988a822110443e4957a is 1080, key is row0001/info:/1733358455293/Put/seqid=0 2024-12-05T00:27:35,317 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45511 is added to blk_1073741845_1021 (size=6033) 2024-12-05T00:27:35,317 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35063 is added to blk_1073741845_1021 (size=6033) 2024-12-05T00:27:35,317 INFO [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=22 (bloomFilter=true), to=hdfs://localhost:34317/user/jenkins/test-data/f2fc732a-4866-7874-b084-19a701814bc1/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/9da6c76fa6cf4f1b43cdce02d6177165/.tmp/info/6954993b71ae4988a822110443e4957a 2024-12-05T00:27:35,324 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34317/user/jenkins/test-data/f2fc732a-4866-7874-b084-19a701814bc1/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/9da6c76fa6cf4f1b43cdce02d6177165/.tmp/info/6954993b71ae4988a822110443e4957a as hdfs://localhost:34317/user/jenkins/test-data/f2fc732a-4866-7874-b084-19a701814bc1/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/9da6c76fa6cf4f1b43cdce02d6177165/info/6954993b71ae4988a822110443e4957a 2024-12-05T00:27:35,324 DEBUG [RS_CLOSE_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34317/user/jenkins/test-data/f2fc732a-4866-7874-b084-19a701814bc1/data/hbase/meta/1588230740/.tmp/info/1295c155b78146828e62dc9966df17a5 is 227, key is TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733358404991.9da6c76fa6cf4f1b43cdce02d6177165./info:regioninfo/1733358405348/Put/seqid=0 2024-12-05T00:27:35,328 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35063 is added to blk_1073741846_1022 (size=7308) 2024-12-05T00:27:35,329 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45511 is added to blk_1073741846_1022 (size=7308) 2024-12-05T00:27:35,329 INFO [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34317/user/jenkins/test-data/f2fc732a-4866-7874-b084-19a701814bc1/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/9da6c76fa6cf4f1b43cdce02d6177165/info/6954993b71ae4988a822110443e4957a, entries=1, sequenceid=22, filesize=5.9 K 2024-12-05T00:27:35,329 INFO [RS_CLOSE_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.65 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:34317/user/jenkins/test-data/f2fc732a-4866-7874-b084-19a701814bc1/data/hbase/meta/1588230740/.tmp/info/1295c155b78146828e62dc9966df17a5 2024-12-05T00:27:35,330 INFO [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 9da6c76fa6cf4f1b43cdce02d6177165 in 24ms, sequenceid=22, compaction requested=true 2024-12-05T00:27:35,331 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733358404991.9da6c76fa6cf4f1b43cdce02d6177165.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:34317/user/jenkins/test-data/f2fc732a-4866-7874-b084-19a701814bc1/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/9da6c76fa6cf4f1b43cdce02d6177165/info/8e3fe81af6a147e9adeaf51f0a084fc5, hdfs://localhost:34317/user/jenkins/test-data/f2fc732a-4866-7874-b084-19a701814bc1/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/9da6c76fa6cf4f1b43cdce02d6177165/info/7f1fd359923d444c890c303e043130ea, hdfs://localhost:34317/user/jenkins/test-data/f2fc732a-4866-7874-b084-19a701814bc1/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/9da6c76fa6cf4f1b43cdce02d6177165/info/ecc49a4622b24e3891fb12945d6a9bf8] to archive 2024-12-05T00:27:35,331 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733358404991.9da6c76fa6cf4f1b43cdce02d6177165.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-05T00:27:35,333 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733358404991.9da6c76fa6cf4f1b43cdce02d6177165.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34317/user/jenkins/test-data/f2fc732a-4866-7874-b084-19a701814bc1/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/9da6c76fa6cf4f1b43cdce02d6177165/info/8e3fe81af6a147e9adeaf51f0a084fc5 to hdfs://localhost:34317/user/jenkins/test-data/f2fc732a-4866-7874-b084-19a701814bc1/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/9da6c76fa6cf4f1b43cdce02d6177165/info/8e3fe81af6a147e9adeaf51f0a084fc5 2024-12-05T00:27:35,335 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733358404991.9da6c76fa6cf4f1b43cdce02d6177165.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34317/user/jenkins/test-data/f2fc732a-4866-7874-b084-19a701814bc1/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/9da6c76fa6cf4f1b43cdce02d6177165/info/7f1fd359923d444c890c303e043130ea to hdfs://localhost:34317/user/jenkins/test-data/f2fc732a-4866-7874-b084-19a701814bc1/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/9da6c76fa6cf4f1b43cdce02d6177165/info/7f1fd359923d444c890c303e043130ea 2024-12-05T00:27:35,336 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733358404991.9da6c76fa6cf4f1b43cdce02d6177165.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34317/user/jenkins/test-data/f2fc732a-4866-7874-b084-19a701814bc1/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/9da6c76fa6cf4f1b43cdce02d6177165/info/ecc49a4622b24e3891fb12945d6a9bf8 to hdfs://localhost:34317/user/jenkins/test-data/f2fc732a-4866-7874-b084-19a701814bc1/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/9da6c76fa6cf4f1b43cdce02d6177165/info/ecc49a4622b24e3891fb12945d6a9bf8 2024-12-05T00:27:35,336 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733358404991.9da6c76fa6cf4f1b43cdce02d6177165.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=2113c16e5528:38279 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-12-05T00:27:35,337 WARN [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733358404991.9da6c76fa6cf4f1b43cdce02d6177165.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [8e3fe81af6a147e9adeaf51f0a084fc5=6033, 7f1fd359923d444c890c303e043130ea=6033, ecc49a4622b24e3891fb12945d6a9bf8=6033] 2024-12-05T00:27:35,341 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34317/user/jenkins/test-data/f2fc732a-4866-7874-b084-19a701814bc1/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/9da6c76fa6cf4f1b43cdce02d6177165/recovered.edits/25.seqid, newMaxSeqId=25, maxSeqId=1 2024-12-05T00:27:35,341 INFO [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733358404991.9da6c76fa6cf4f1b43cdce02d6177165. 2024-12-05T00:27:35,341 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 9da6c76fa6cf4f1b43cdce02d6177165: Waiting for close lock at 1733358455306Running coprocessor pre-close hooks at 1733358455306Disabling compacts and flushes for region at 1733358455306Disabling writes for close at 1733358455306Obtaining lock to block concurrent updates at 1733358455306Preparing flush snapshotting stores in 9da6c76fa6cf4f1b43cdce02d6177165 at 1733358455306Finished memstore snapshotting TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733358404991.9da6c76fa6cf4f1b43cdce02d6177165., syncing WAL and waiting on mvcc, flushsize=dataSize=1076, getHeapSize=1392, getOffHeapSize=0, getCellsCount=1 at 1733358455306Flushing stores of TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733358404991.9da6c76fa6cf4f1b43cdce02d6177165. at 1733358455307 (+1 ms)Flushing 9da6c76fa6cf4f1b43cdce02d6177165/info: creating writer at 1733358455307Flushing 9da6c76fa6cf4f1b43cdce02d6177165/info: appending metadata at 1733358455310 (+3 ms)Flushing 9da6c76fa6cf4f1b43cdce02d6177165/info: closing flushed file at 1733358455310Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@44f5171f: reopening flushed file at 1733358455323 (+13 ms)Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 9da6c76fa6cf4f1b43cdce02d6177165 in 24ms, sequenceid=22, compaction requested=true at 1733358455330 (+7 ms)Writing region close event to WAL at 1733358455337 (+7 ms)Running coprocessor post-close hooks at 1733358455341 (+4 ms)Closed at 1733358455341 2024-12-05T00:27:35,342 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733358404991.9da6c76fa6cf4f1b43cdce02d6177165. 2024-12-05T00:27:35,348 DEBUG [RS_CLOSE_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34317/user/jenkins/test-data/f2fc732a-4866-7874-b084-19a701814bc1/data/hbase/meta/1588230740/.tmp/ns/477cb6a8224f4216b36578583367abb6 is 43, key is default/ns:d/1733358404958/Put/seqid=0 2024-12-05T00:27:35,353 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45511 is added to blk_1073741847_1023 (size=5153) 2024-12-05T00:27:35,353 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35063 is added to blk_1073741847_1023 (size=5153) 2024-12-05T00:27:35,354 INFO [RS_CLOSE_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:34317/user/jenkins/test-data/f2fc732a-4866-7874-b084-19a701814bc1/data/hbase/meta/1588230740/.tmp/ns/477cb6a8224f4216b36578583367abb6 2024-12-05T00:27:35,373 DEBUG [RS_CLOSE_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34317/user/jenkins/test-data/f2fc732a-4866-7874-b084-19a701814bc1/data/hbase/meta/1588230740/.tmp/table/c53f0fede84e4550a572720e088b87f8 is 89, key is TestLogRolling-testCompactionRecordDoesntBlockRolling/table:state/1733358405357/Put/seqid=0 2024-12-05T00:27:35,377 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35063 is added to blk_1073741848_1024 (size=5508) 2024-12-05T00:27:35,377 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45511 is added to blk_1073741848_1024 (size=5508) 2024-12-05T00:27:35,378 INFO [RS_CLOSE_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=170 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:34317/user/jenkins/test-data/f2fc732a-4866-7874-b084-19a701814bc1/data/hbase/meta/1588230740/.tmp/table/c53f0fede84e4550a572720e088b87f8 2024-12-05T00:27:35,384 DEBUG [RS_CLOSE_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34317/user/jenkins/test-data/f2fc732a-4866-7874-b084-19a701814bc1/data/hbase/meta/1588230740/.tmp/info/1295c155b78146828e62dc9966df17a5 as hdfs://localhost:34317/user/jenkins/test-data/f2fc732a-4866-7874-b084-19a701814bc1/data/hbase/meta/1588230740/info/1295c155b78146828e62dc9966df17a5 2024-12-05T00:27:35,388 INFO [RS_CLOSE_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34317/user/jenkins/test-data/f2fc732a-4866-7874-b084-19a701814bc1/data/hbase/meta/1588230740/info/1295c155b78146828e62dc9966df17a5, entries=10, sequenceid=11, filesize=7.1 K 2024-12-05T00:27:35,389 DEBUG [RS_CLOSE_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34317/user/jenkins/test-data/f2fc732a-4866-7874-b084-19a701814bc1/data/hbase/meta/1588230740/.tmp/ns/477cb6a8224f4216b36578583367abb6 as hdfs://localhost:34317/user/jenkins/test-data/f2fc732a-4866-7874-b084-19a701814bc1/data/hbase/meta/1588230740/ns/477cb6a8224f4216b36578583367abb6 2024-12-05T00:27:35,394 INFO [RS_CLOSE_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34317/user/jenkins/test-data/f2fc732a-4866-7874-b084-19a701814bc1/data/hbase/meta/1588230740/ns/477cb6a8224f4216b36578583367abb6, entries=2, sequenceid=11, filesize=5.0 K 2024-12-05T00:27:35,394 DEBUG [RS_CLOSE_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34317/user/jenkins/test-data/f2fc732a-4866-7874-b084-19a701814bc1/data/hbase/meta/1588230740/.tmp/table/c53f0fede84e4550a572720e088b87f8 as hdfs://localhost:34317/user/jenkins/test-data/f2fc732a-4866-7874-b084-19a701814bc1/data/hbase/meta/1588230740/table/c53f0fede84e4550a572720e088b87f8 2024-12-05T00:27:35,399 INFO [RS_CLOSE_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34317/user/jenkins/test-data/f2fc732a-4866-7874-b084-19a701814bc1/data/hbase/meta/1588230740/table/c53f0fede84e4550a572720e088b87f8, entries=2, sequenceid=11, filesize=5.4 K 2024-12-05T00:27:35,400 INFO [RS_CLOSE_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.89 KB/1932, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 1588230740 in 94ms, sequenceid=11, compaction requested=false 2024-12-05T00:27:35,405 DEBUG [RS_CLOSE_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34317/user/jenkins/test-data/f2fc732a-4866-7874-b084-19a701814bc1/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-12-05T00:27:35,405 DEBUG [RS_CLOSE_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-05T00:27:35,405 INFO [RS_CLOSE_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-05T00:27:35,405 DEBUG [RS_CLOSE_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733358455306Running coprocessor pre-close hooks at 1733358455306Disabling compacts and flushes for region at 1733358455306Disabling writes for close at 1733358455306Obtaining lock to block concurrent updates at 1733358455306Preparing flush snapshotting stores in 1588230740 at 1733358455306Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1932, getHeapSize=3936, getOffHeapSize=0, getCellsCount=14 at 1733358455307 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1733358455307Flushing 1588230740/info: creating writer at 1733358455307Flushing 1588230740/info: appending metadata at 1733358455324 (+17 ms)Flushing 1588230740/info: closing flushed file at 1733358455324Flushing 1588230740/ns: creating writer at 1733358455334 (+10 ms)Flushing 1588230740/ns: appending metadata at 1733358455348 (+14 ms)Flushing 1588230740/ns: closing flushed file at 1733358455348Flushing 1588230740/table: creating writer at 1733358455358 (+10 ms)Flushing 1588230740/table: appending metadata at 1733358455372 (+14 ms)Flushing 1588230740/table: closing flushed file at 1733358455372Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@51b2ceda: reopening flushed file at 1733358455383 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2020af62: reopening flushed file at 1733358455389 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@292fd574: reopening flushed file at 1733358455394 (+5 ms)Finished flush of dataSize ~1.89 KB/1932, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 1588230740 in 94ms, sequenceid=11, compaction requested=false at 1733358455400 (+6 ms)Writing region close event to WAL at 1733358455401 (+1 ms)Running coprocessor post-close hooks at 1733358455405 (+4 ms)Closed at 1733358455405 2024-12-05T00:27:35,406 DEBUG [RS_CLOSE_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-05T00:27:35,472 INFO [regionserver/2113c16e5528:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-12-05T00:27:35,472 INFO [regionserver/2113c16e5528:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-05T00:27:35,506 INFO [RS:0;2113c16e5528:38997 {}] regionserver.HRegionServer(976): stopping server 2113c16e5528,38997,1733358404160; all regions closed. 2024-12-05T00:27:35,507 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:27:35,507 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:27:35,507 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:27:35,507 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:27:35,507 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:27:35,509 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45511 is added to blk_1073741834_1010 (size=3306) 2024-12-05T00:27:35,510 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35063 is added to blk_1073741834_1010 (size=3306) 2024-12-05T00:27:35,512 DEBUG [RS:0;2113c16e5528:38997 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/f2fc732a-4866-7874-b084-19a701814bc1/oldWALs 2024-12-05T00:27:35,512 INFO [RS:0;2113c16e5528:38997 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 2113c16e5528%2C38997%2C1733358404160.meta:.meta(num 1733358404916) 2024-12-05T00:27:35,512 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:27:35,512 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:27:35,512 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:27:35,512 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:27:35,513 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:27:35,514 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35063 is added to blk_1073741844_1020 (size=1252) 2024-12-05T00:27:35,514 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45511 is added to blk_1073741844_1020 (size=1252) 2024-12-05T00:27:35,517 DEBUG [RS:0;2113c16e5528:38997 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/f2fc732a-4866-7874-b084-19a701814bc1/oldWALs 2024-12-05T00:27:35,517 INFO [RS:0;2113c16e5528:38997 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 2113c16e5528%2C38997%2C1733358404160:(num 1733358455294) 2024-12-05T00:27:35,517 DEBUG [RS:0;2113c16e5528:38997 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T00:27:35,517 INFO [RS:0;2113c16e5528:38997 {}] regionserver.LeaseManager(133): Closed leases 2024-12-05T00:27:35,517 INFO [RS:0;2113c16e5528:38997 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-05T00:27:35,517 INFO [RS:0;2113c16e5528:38997 {}] hbase.ChoreService(370): Chore service for: regionserver/2113c16e5528:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-05T00:27:35,517 INFO [RS:0;2113c16e5528:38997 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-05T00:27:35,517 INFO [regionserver/2113c16e5528:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-05T00:27:35,518 INFO [RS:0;2113c16e5528:38997 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:38997 2024-12-05T00:27:35,520 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38279-0x10180042c950000, quorum=127.0.0.1:51612, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-05T00:27:35,520 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38997-0x10180042c950001, quorum=127.0.0.1:51612, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/2113c16e5528,38997,1733358404160 2024-12-05T00:27:35,520 INFO [RS:0;2113c16e5528:38997 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-05T00:27:35,520 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [2113c16e5528,38997,1733358404160] 2024-12-05T00:27:35,523 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/2113c16e5528,38997,1733358404160 already deleted, retry=false 2024-12-05T00:27:35,523 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 2113c16e5528,38997,1733358404160 expired; onlineServers=0 2024-12-05T00:27:35,523 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '2113c16e5528,38279,1733358404115' ***** 2024-12-05T00:27:35,523 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-05T00:27:35,523 INFO [M:0;2113c16e5528:38279 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-05T00:27:35,523 INFO [M:0;2113c16e5528:38279 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-05T00:27:35,524 DEBUG [M:0;2113c16e5528:38279 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-05T00:27:35,524 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-05T00:27:35,524 DEBUG [M:0;2113c16e5528:38279 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-05T00:27:35,524 DEBUG [master/2113c16e5528:0:becomeActiveMaster-HFileCleaner.large.0-1733358404319 {}] cleaner.HFileCleaner(306): Exit Thread[master/2113c16e5528:0:becomeActiveMaster-HFileCleaner.large.0-1733358404319,5,FailOnTimeoutGroup] 2024-12-05T00:27:35,524 DEBUG [master/2113c16e5528:0:becomeActiveMaster-HFileCleaner.small.0-1733358404320 {}] cleaner.HFileCleaner(306): Exit Thread[master/2113c16e5528:0:becomeActiveMaster-HFileCleaner.small.0-1733358404320,5,FailOnTimeoutGroup] 2024-12-05T00:27:35,524 INFO [M:0;2113c16e5528:38279 {}] hbase.ChoreService(370): Chore service for: master/2113c16e5528:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-05T00:27:35,524 INFO [M:0;2113c16e5528:38279 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-05T00:27:35,524 DEBUG [M:0;2113c16e5528:38279 {}] master.HMaster(1795): Stopping service threads 2024-12-05T00:27:35,524 INFO [M:0;2113c16e5528:38279 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-05T00:27:35,524 INFO [M:0;2113c16e5528:38279 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-05T00:27:35,524 INFO [M:0;2113c16e5528:38279 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-05T00:27:35,524 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-05T00:27:35,525 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38279-0x10180042c950000, quorum=127.0.0.1:51612, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-05T00:27:35,525 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38279-0x10180042c950000, quorum=127.0.0.1:51612, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T00:27:35,526 DEBUG [M:0;2113c16e5528:38279 {}] zookeeper.ZKUtil(347): master:38279-0x10180042c950000, quorum=127.0.0.1:51612, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-05T00:27:35,526 WARN [M:0;2113c16e5528:38279 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-05T00:27:35,526 INFO [M:0;2113c16e5528:38279 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:34317/user/jenkins/test-data/f2fc732a-4866-7874-b084-19a701814bc1/.lastflushedseqids 2024-12-05T00:27:35,531 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45511 is added to blk_1073741849_1025 (size=130) 2024-12-05T00:27:35,531 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35063 is added to blk_1073741849_1025 (size=130) 2024-12-05T00:27:35,531 INFO [M:0;2113c16e5528:38279 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-05T00:27:35,532 INFO [M:0;2113c16e5528:38279 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-05T00:27:35,532 DEBUG [M:0;2113c16e5528:38279 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-05T00:27:35,532 INFO [M:0;2113c16e5528:38279 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-05T00:27:35,532 DEBUG [M:0;2113c16e5528:38279 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-05T00:27:35,532 DEBUG [M:0;2113c16e5528:38279 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-05T00:27:35,532 DEBUG [M:0;2113c16e5528:38279 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-05T00:27:35,532 INFO [M:0;2113c16e5528:38279 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=43.54 KB heapSize=54.91 KB 2024-12-05T00:27:35,547 DEBUG [M:0;2113c16e5528:38279 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34317/user/jenkins/test-data/f2fc732a-4866-7874-b084-19a701814bc1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/bee5f504966b496b91d3d68394d57791 is 82, key is hbase:meta,,1/info:regioninfo/1733358404939/Put/seqid=0 2024-12-05T00:27:35,552 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45511 is added to blk_1073741850_1026 (size=5672) 2024-12-05T00:27:35,552 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35063 is added to blk_1073741850_1026 (size=5672) 2024-12-05T00:27:35,553 INFO [M:0;2113c16e5528:38279 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:34317/user/jenkins/test-data/f2fc732a-4866-7874-b084-19a701814bc1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/bee5f504966b496b91d3d68394d57791 2024-12-05T00:27:35,572 DEBUG [M:0;2113c16e5528:38279 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34317/user/jenkins/test-data/f2fc732a-4866-7874-b084-19a701814bc1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/33b3a1e53c104f4181160cbf3de16286 is 797, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1733358405362/Put/seqid=0 2024-12-05T00:27:35,576 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35063 is added to blk_1073741851_1027 (size=7818) 2024-12-05T00:27:35,576 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45511 is added to blk_1073741851_1027 (size=7818) 2024-12-05T00:27:35,577 INFO [M:0;2113c16e5528:38279 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.94 KB at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:34317/user/jenkins/test-data/f2fc732a-4866-7874-b084-19a701814bc1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/33b3a1e53c104f4181160cbf3de16286 2024-12-05T00:27:35,581 INFO [M:0;2113c16e5528:38279 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 33b3a1e53c104f4181160cbf3de16286 2024-12-05T00:27:35,595 DEBUG [M:0;2113c16e5528:38279 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34317/user/jenkins/test-data/f2fc732a-4866-7874-b084-19a701814bc1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/317565da5b70415a9ac1008f18f73c81 is 69, key is 2113c16e5528,38997,1733358404160/rs:state/1733358404397/Put/seqid=0 2024-12-05T00:27:35,598 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35063 is added to blk_1073741852_1028 (size=5156) 2024-12-05T00:27:35,599 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45511 is added to blk_1073741852_1028 (size=5156) 2024-12-05T00:27:35,599 INFO [M:0;2113c16e5528:38279 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:34317/user/jenkins/test-data/f2fc732a-4866-7874-b084-19a701814bc1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/317565da5b70415a9ac1008f18f73c81 2024-12-05T00:27:35,616 DEBUG [M:0;2113c16e5528:38279 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34317/user/jenkins/test-data/f2fc732a-4866-7874-b084-19a701814bc1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/658c379cd2f244c086e0170724e292e2 is 52, key is load_balancer_on/state:d/1733358404987/Put/seqid=0 2024-12-05T00:27:35,620 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35063 is added to blk_1073741853_1029 (size=5056) 2024-12-05T00:27:35,620 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45511 is added to blk_1073741853_1029 (size=5056) 2024-12-05T00:27:35,621 INFO [M:0;2113c16e5528:38279 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:34317/user/jenkins/test-data/f2fc732a-4866-7874-b084-19a701814bc1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/658c379cd2f244c086e0170724e292e2 2024-12-05T00:27:35,621 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38997-0x10180042c950001, quorum=127.0.0.1:51612, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-05T00:27:35,621 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38997-0x10180042c950001, quorum=127.0.0.1:51612, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-05T00:27:35,621 INFO [RS:0;2113c16e5528:38997 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-05T00:27:35,621 INFO [RS:0;2113c16e5528:38997 {}] regionserver.HRegionServer(1031): Exiting; stopping=2113c16e5528,38997,1733358404160; zookeeper connection closed. 2024-12-05T00:27:35,622 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@44aac808 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@44aac808 2024-12-05T00:27:35,622 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-12-05T00:27:35,625 DEBUG [M:0;2113c16e5528:38279 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34317/user/jenkins/test-data/f2fc732a-4866-7874-b084-19a701814bc1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/bee5f504966b496b91d3d68394d57791 as hdfs://localhost:34317/user/jenkins/test-data/f2fc732a-4866-7874-b084-19a701814bc1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/bee5f504966b496b91d3d68394d57791 2024-12-05T00:27:35,630 INFO [M:0;2113c16e5528:38279 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34317/user/jenkins/test-data/f2fc732a-4866-7874-b084-19a701814bc1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/bee5f504966b496b91d3d68394d57791, entries=8, sequenceid=121, filesize=5.5 K 2024-12-05T00:27:35,630 DEBUG [M:0;2113c16e5528:38279 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34317/user/jenkins/test-data/f2fc732a-4866-7874-b084-19a701814bc1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/33b3a1e53c104f4181160cbf3de16286 as hdfs://localhost:34317/user/jenkins/test-data/f2fc732a-4866-7874-b084-19a701814bc1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/33b3a1e53c104f4181160cbf3de16286 2024-12-05T00:27:35,635 INFO [M:0;2113c16e5528:38279 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 33b3a1e53c104f4181160cbf3de16286 2024-12-05T00:27:35,635 INFO [M:0;2113c16e5528:38279 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34317/user/jenkins/test-data/f2fc732a-4866-7874-b084-19a701814bc1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/33b3a1e53c104f4181160cbf3de16286, entries=14, sequenceid=121, filesize=7.6 K 2024-12-05T00:27:35,636 DEBUG [M:0;2113c16e5528:38279 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34317/user/jenkins/test-data/f2fc732a-4866-7874-b084-19a701814bc1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/317565da5b70415a9ac1008f18f73c81 as hdfs://localhost:34317/user/jenkins/test-data/f2fc732a-4866-7874-b084-19a701814bc1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/317565da5b70415a9ac1008f18f73c81 2024-12-05T00:27:35,640 INFO [M:0;2113c16e5528:38279 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34317/user/jenkins/test-data/f2fc732a-4866-7874-b084-19a701814bc1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/317565da5b70415a9ac1008f18f73c81, entries=1, sequenceid=121, filesize=5.0 K 2024-12-05T00:27:35,640 DEBUG [M:0;2113c16e5528:38279 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34317/user/jenkins/test-data/f2fc732a-4866-7874-b084-19a701814bc1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/658c379cd2f244c086e0170724e292e2 as hdfs://localhost:34317/user/jenkins/test-data/f2fc732a-4866-7874-b084-19a701814bc1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/658c379cd2f244c086e0170724e292e2 2024-12-05T00:27:35,644 INFO [M:0;2113c16e5528:38279 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34317/user/jenkins/test-data/f2fc732a-4866-7874-b084-19a701814bc1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/658c379cd2f244c086e0170724e292e2, entries=1, sequenceid=121, filesize=4.9 K 2024-12-05T00:27:35,646 INFO [M:0;2113c16e5528:38279 {}] regionserver.HRegion(3140): Finished flush of dataSize ~43.54 KB/44590, heapSize ~54.85 KB/56168, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 114ms, sequenceid=121, compaction requested=false 2024-12-05T00:27:35,647 INFO [M:0;2113c16e5528:38279 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-05T00:27:35,647 DEBUG [M:0;2113c16e5528:38279 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733358455532Disabling compacts and flushes for region at 1733358455532Disabling writes for close at 1733358455532Obtaining lock to block concurrent updates at 1733358455532Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733358455532Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=44590, getHeapSize=56168, getOffHeapSize=0, getCellsCount=140 at 1733358455532Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733358455533 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733358455533Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733358455547 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733358455547Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733358455557 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733358455571 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733358455571Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733358455581 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733358455594 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733358455594Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1733358455603 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1733358455615 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1733358455615Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2eb7c3e8: reopening flushed file at 1733358455625 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@65308fcc: reopening flushed file at 1733358455630 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@e0d538: reopening flushed file at 1733358455635 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@8562e3f: reopening flushed file at 1733358455640 (+5 ms)Finished flush of dataSize ~43.54 KB/44590, heapSize ~54.85 KB/56168, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 114ms, sequenceid=121, compaction requested=false at 1733358455646 (+6 ms)Writing region close event to WAL at 1733358455647 (+1 ms)Closed at 1733358455647 2024-12-05T00:27:35,647 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:27:35,648 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:27:35,648 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:27:35,648 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:27:35,648 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:27:35,649 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45511 is added to blk_1073741830_1006 (size=52987) 2024-12-05T00:27:35,650 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35063 is added to blk_1073741830_1006 (size=52987) 2024-12-05T00:27:35,650 INFO [M:0;2113c16e5528:38279 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-05T00:27:35,650 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-05T00:27:35,650 INFO [M:0;2113c16e5528:38279 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:38279 2024-12-05T00:27:35,651 INFO [M:0;2113c16e5528:38279 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-05T00:27:35,752 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38279-0x10180042c950000, quorum=127.0.0.1:51612, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-05T00:27:35,752 INFO [M:0;2113c16e5528:38279 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-05T00:27:35,752 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38279-0x10180042c950000, quorum=127.0.0.1:51612, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-05T00:27:35,755 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@39212263{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-05T00:27:35,755 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@10128232{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-05T00:27:35,755 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-05T00:27:35,755 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4cf5e3df{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-05T00:27:35,755 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@46a495b0{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ea360916-0010-a7e3-14b6-cc9064d2e4cf/hadoop.log.dir/,STOPPED} 2024-12-05T00:27:35,757 WARN [BP-1984325545-172.17.0.2-1733358403462 heartbeating to localhost/127.0.0.1:34317 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-05T00:27:35,757 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-05T00:27:35,757 WARN [BP-1984325545-172.17.0.2-1733358403462 heartbeating to localhost/127.0.0.1:34317 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1984325545-172.17.0.2-1733358403462 (Datanode Uuid 2fb8046e-f161-4150-a543-876646e66d50) service to localhost/127.0.0.1:34317 2024-12-05T00:27:35,757 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-05T00:27:35,757 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ea360916-0010-a7e3-14b6-cc9064d2e4cf/cluster_47ea64d0-2ea6-3900-1f9e-b12b87839e9f/data/data3/current/BP-1984325545-172.17.0.2-1733358403462 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-05T00:27:35,758 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ea360916-0010-a7e3-14b6-cc9064d2e4cf/cluster_47ea64d0-2ea6-3900-1f9e-b12b87839e9f/data/data4/current/BP-1984325545-172.17.0.2-1733358403462 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-05T00:27:35,758 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-05T00:27:35,760 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2ac0122b{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-05T00:27:35,760 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@648e3649{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-05T00:27:35,760 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-05T00:27:35,760 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5a296252{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-05T00:27:35,760 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6eebb3dc{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ea360916-0010-a7e3-14b6-cc9064d2e4cf/hadoop.log.dir/,STOPPED} 2024-12-05T00:27:35,761 WARN [BP-1984325545-172.17.0.2-1733358403462 heartbeating to localhost/127.0.0.1:34317 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-05T00:27:35,761 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-05T00:27:35,761 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-05T00:27:35,761 WARN [BP-1984325545-172.17.0.2-1733358403462 heartbeating to localhost/127.0.0.1:34317 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1984325545-172.17.0.2-1733358403462 (Datanode Uuid 0dfe5e03-f207-4f58-b680-945127692ab6) service to localhost/127.0.0.1:34317 2024-12-05T00:27:35,762 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ea360916-0010-a7e3-14b6-cc9064d2e4cf/cluster_47ea64d0-2ea6-3900-1f9e-b12b87839e9f/data/data1/current/BP-1984325545-172.17.0.2-1733358403462 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-05T00:27:35,762 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ea360916-0010-a7e3-14b6-cc9064d2e4cf/cluster_47ea64d0-2ea6-3900-1f9e-b12b87839e9f/data/data2/current/BP-1984325545-172.17.0.2-1733358403462 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-05T00:27:35,762 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-05T00:27:35,768 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@701842fe{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-05T00:27:35,769 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@785f0d23{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-05T00:27:35,769 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-05T00:27:35,769 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@63f38dfa{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-05T00:27:35,769 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@62e6de45{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ea360916-0010-a7e3-14b6-cc9064d2e4cf/hadoop.log.dir/,STOPPED} 2024-12-05T00:27:35,775 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-05T00:27:35,793 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-05T00:27:35,801 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testCompactionRecordDoesntBlockRolling Thread=206 (was 179) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:34317 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:34317 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:34317 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-16 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:34317 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:34317 from jenkins.hfs.5 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-37-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: regionserver/2113c16e5528:0.leaseChecker java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.regionserver.LeaseManager.run(LeaseManager.java:82) Potentially hanging thread: HMaster-EventLoopGroup-12-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:34317 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:34317 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.5@localhost:34317 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:34317 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=485 (was 457) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=45 (was 114), ProcessCount=11 (was 11), AvailableMemoryMB=8648 (was 8695) 2024-12-05T00:27:35,808 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRolling Thread=206, OpenFileDescriptor=485, MaxFileDescriptor=1048576, SystemLoadAverage=45, ProcessCount=11, AvailableMemoryMB=8649 2024-12-05T00:27:35,808 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-05T00:27:35,808 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ea360916-0010-a7e3-14b6-cc9064d2e4cf/hadoop.log.dir so I do NOT create it in target/test-data/c4dcb673-d54e-1ea6-383b-99332c3b6cc8 2024-12-05T00:27:35,809 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ea360916-0010-a7e3-14b6-cc9064d2e4cf/hadoop.tmp.dir so I do NOT create it in target/test-data/c4dcb673-d54e-1ea6-383b-99332c3b6cc8 2024-12-05T00:27:35,809 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c4dcb673-d54e-1ea6-383b-99332c3b6cc8/cluster_938ee5fe-d9d1-29e3-faad-cab85c93ca9a, deleteOnExit=true 2024-12-05T00:27:35,809 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-12-05T00:27:35,809 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c4dcb673-d54e-1ea6-383b-99332c3b6cc8/test.cache.data in system properties and HBase conf 2024-12-05T00:27:35,809 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c4dcb673-d54e-1ea6-383b-99332c3b6cc8/hadoop.tmp.dir in system properties and HBase conf 2024-12-05T00:27:35,809 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c4dcb673-d54e-1ea6-383b-99332c3b6cc8/hadoop.log.dir in system properties and HBase conf 2024-12-05T00:27:35,809 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c4dcb673-d54e-1ea6-383b-99332c3b6cc8/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-05T00:27:35,809 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c4dcb673-d54e-1ea6-383b-99332c3b6cc8/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-05T00:27:35,809 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-05T00:27:35,809 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-05T00:27:35,809 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c4dcb673-d54e-1ea6-383b-99332c3b6cc8/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-05T00:27:35,809 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c4dcb673-d54e-1ea6-383b-99332c3b6cc8/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-05T00:27:35,809 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c4dcb673-d54e-1ea6-383b-99332c3b6cc8/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-05T00:27:35,809 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c4dcb673-d54e-1ea6-383b-99332c3b6cc8/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-05T00:27:35,810 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c4dcb673-d54e-1ea6-383b-99332c3b6cc8/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-05T00:27:35,810 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c4dcb673-d54e-1ea6-383b-99332c3b6cc8/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-05T00:27:35,810 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c4dcb673-d54e-1ea6-383b-99332c3b6cc8/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-05T00:27:35,810 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c4dcb673-d54e-1ea6-383b-99332c3b6cc8/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-05T00:27:35,810 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c4dcb673-d54e-1ea6-383b-99332c3b6cc8/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-05T00:27:35,810 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c4dcb673-d54e-1ea6-383b-99332c3b6cc8/nfs.dump.dir in system properties and HBase conf 2024-12-05T00:27:35,810 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c4dcb673-d54e-1ea6-383b-99332c3b6cc8/java.io.tmpdir in system properties and HBase conf 2024-12-05T00:27:35,810 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c4dcb673-d54e-1ea6-383b-99332c3b6cc8/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-05T00:27:35,810 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c4dcb673-d54e-1ea6-383b-99332c3b6cc8/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-05T00:27:35,810 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c4dcb673-d54e-1ea6-383b-99332c3b6cc8/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-05T00:27:35,823 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-05T00:27:35,881 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-05T00:27:35,884 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-05T00:27:35,885 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-05T00:27:35,885 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-05T00:27:35,885 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-05T00:27:35,886 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-05T00:27:35,886 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@59703725{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c4dcb673-d54e-1ea6-383b-99332c3b6cc8/hadoop.log.dir/,AVAILABLE} 2024-12-05T00:27:35,886 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@240fc28c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-05T00:27:35,999 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@188ddc10{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c4dcb673-d54e-1ea6-383b-99332c3b6cc8/java.io.tmpdir/jetty-localhost-40877-hadoop-hdfs-3_4_1-tests_jar-_-any-3090296966443603622/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-05T00:27:36,000 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5fc37f93{HTTP/1.1, (http/1.1)}{localhost:40877} 2024-12-05T00:27:36,000 INFO [Time-limited test {}] server.Server(415): Started @236627ms 2024-12-05T00:27:36,013 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-05T00:27:36,066 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-05T00:27:36,068 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-05T00:27:36,069 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-05T00:27:36,069 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-05T00:27:36,069 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-05T00:27:36,069 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7fc50460{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c4dcb673-d54e-1ea6-383b-99332c3b6cc8/hadoop.log.dir/,AVAILABLE} 2024-12-05T00:27:36,070 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3d639fc0{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-05T00:27:36,201 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1091e18a{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c4dcb673-d54e-1ea6-383b-99332c3b6cc8/java.io.tmpdir/jetty-localhost-43605-hadoop-hdfs-3_4_1-tests_jar-_-any-7545040708312099776/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-05T00:27:36,202 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3221a4aa{HTTP/1.1, (http/1.1)}{localhost:43605} 2024-12-05T00:27:36,202 INFO [Time-limited test {}] server.Server(415): Started @236829ms 2024-12-05T00:27:36,203 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-05T00:27:36,230 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,45031,1733358326108/2113c16e5528%2C45031%2C1733358326108.1733358326307 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:27:36,230 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,37749,1733358324931/2113c16e5528%2C37749%2C1733358324931.meta.1733358325913.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:27:36,232 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-05T00:27:36,235 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-05T00:27:36,237 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-05T00:27:36,237 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-05T00:27:36,237 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-05T00:27:36,237 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@53298b3d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c4dcb673-d54e-1ea6-383b-99332c3b6cc8/hadoop.log.dir/,AVAILABLE} 2024-12-05T00:27:36,237 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@314e7370{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-05T00:27:36,307 WARN [Thread-1949 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c4dcb673-d54e-1ea6-383b-99332c3b6cc8/cluster_938ee5fe-d9d1-29e3-faad-cab85c93ca9a/data/data1/current/BP-709715538-172.17.0.2-1733358455829/current, will proceed with Du for space computation calculation, 2024-12-05T00:27:36,307 WARN [Thread-1950 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c4dcb673-d54e-1ea6-383b-99332c3b6cc8/cluster_938ee5fe-d9d1-29e3-faad-cab85c93ca9a/data/data2/current/BP-709715538-172.17.0.2-1733358455829/current, will proceed with Du for space computation calculation, 2024-12-05T00:27:36,323 WARN [Thread-1928 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-05T00:27:36,325 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xb737722a5096a672 with lease ID 0x6bd31d9f76651efc: Processing first storage report for DS-cade178e-0e42-4b1f-aca7-52f686da4a4f from datanode DatanodeRegistration(127.0.0.1:44469, datanodeUuid=922058ce-75e1-4506-a65a-f8311b2cfa1d, infoPort=35401, infoSecurePort=0, ipcPort=39873, storageInfo=lv=-57;cid=testClusterID;nsid=1002086629;c=1733358455829) 2024-12-05T00:27:36,325 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb737722a5096a672 with lease ID 0x6bd31d9f76651efc: from storage DS-cade178e-0e42-4b1f-aca7-52f686da4a4f node DatanodeRegistration(127.0.0.1:44469, datanodeUuid=922058ce-75e1-4506-a65a-f8311b2cfa1d, infoPort=35401, infoSecurePort=0, ipcPort=39873, storageInfo=lv=-57;cid=testClusterID;nsid=1002086629;c=1733358455829), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-05T00:27:36,325 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xb737722a5096a672 with lease ID 0x6bd31d9f76651efc: Processing first storage report for DS-e24bad35-9ce5-48be-8947-604713e7c6dc from datanode DatanodeRegistration(127.0.0.1:44469, datanodeUuid=922058ce-75e1-4506-a65a-f8311b2cfa1d, infoPort=35401, infoSecurePort=0, ipcPort=39873, storageInfo=lv=-57;cid=testClusterID;nsid=1002086629;c=1733358455829) 2024-12-05T00:27:36,325 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb737722a5096a672 with lease ID 0x6bd31d9f76651efc: from storage DS-e24bad35-9ce5-48be-8947-604713e7c6dc node DatanodeRegistration(127.0.0.1:44469, datanodeUuid=922058ce-75e1-4506-a65a-f8311b2cfa1d, infoPort=35401, infoSecurePort=0, ipcPort=39873, storageInfo=lv=-57;cid=testClusterID;nsid=1002086629;c=1733358455829), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-05T00:27:36,354 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@27cee48d{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c4dcb673-d54e-1ea6-383b-99332c3b6cc8/java.io.tmpdir/jetty-localhost-41113-hadoop-hdfs-3_4_1-tests_jar-_-any-10110615517939845270/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-05T00:27:36,355 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7b29c022{HTTP/1.1, (http/1.1)}{localhost:41113} 2024-12-05T00:27:36,355 INFO [Time-limited test {}] server.Server(415): Started @236982ms 2024-12-05T00:27:36,356 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-05T00:27:36,410 INFO [regionserver/2113c16e5528:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-05T00:27:36,448 WARN [Thread-1975 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c4dcb673-d54e-1ea6-383b-99332c3b6cc8/cluster_938ee5fe-d9d1-29e3-faad-cab85c93ca9a/data/data3/current/BP-709715538-172.17.0.2-1733358455829/current, will proceed with Du for space computation calculation, 2024-12-05T00:27:36,448 WARN [Thread-1976 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c4dcb673-d54e-1ea6-383b-99332c3b6cc8/cluster_938ee5fe-d9d1-29e3-faad-cab85c93ca9a/data/data4/current/BP-709715538-172.17.0.2-1733358455829/current, will proceed with Du for space computation calculation, 2024-12-05T00:27:36,464 WARN [Thread-1964 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-05T00:27:36,466 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x5fd7a6d2fae53890 with lease ID 0x6bd31d9f76651efd: Processing first storage report for DS-710abf9f-3cf2-481a-8a41-39ac8382e144 from datanode DatanodeRegistration(127.0.0.1:36047, datanodeUuid=68280e14-3a57-48ad-b823-905d84844bb5, infoPort=41389, infoSecurePort=0, ipcPort=35849, storageInfo=lv=-57;cid=testClusterID;nsid=1002086629;c=1733358455829) 2024-12-05T00:27:36,466 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x5fd7a6d2fae53890 with lease ID 0x6bd31d9f76651efd: from storage DS-710abf9f-3cf2-481a-8a41-39ac8382e144 node DatanodeRegistration(127.0.0.1:36047, datanodeUuid=68280e14-3a57-48ad-b823-905d84844bb5, infoPort=41389, infoSecurePort=0, ipcPort=35849, storageInfo=lv=-57;cid=testClusterID;nsid=1002086629;c=1733358455829), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-05T00:27:36,466 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x5fd7a6d2fae53890 with lease ID 0x6bd31d9f76651efd: Processing first storage report for DS-f291e68c-4d29-4584-8e3f-c7deeb662579 from datanode DatanodeRegistration(127.0.0.1:36047, datanodeUuid=68280e14-3a57-48ad-b823-905d84844bb5, infoPort=41389, infoSecurePort=0, ipcPort=35849, storageInfo=lv=-57;cid=testClusterID;nsid=1002086629;c=1733358455829) 2024-12-05T00:27:36,466 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x5fd7a6d2fae53890 with lease ID 0x6bd31d9f76651efd: from storage DS-f291e68c-4d29-4584-8e3f-c7deeb662579 node DatanodeRegistration(127.0.0.1:36047, datanodeUuid=68280e14-3a57-48ad-b823-905d84844bb5, infoPort=41389, infoSecurePort=0, ipcPort=35849, storageInfo=lv=-57;cid=testClusterID;nsid=1002086629;c=1733358455829), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-05T00:27:36,476 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c4dcb673-d54e-1ea6-383b-99332c3b6cc8 2024-12-05T00:27:36,479 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c4dcb673-d54e-1ea6-383b-99332c3b6cc8/cluster_938ee5fe-d9d1-29e3-faad-cab85c93ca9a/zookeeper_0, clientPort=58134, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c4dcb673-d54e-1ea6-383b-99332c3b6cc8/cluster_938ee5fe-d9d1-29e3-faad-cab85c93ca9a/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c4dcb673-d54e-1ea6-383b-99332c3b6cc8/cluster_938ee5fe-d9d1-29e3-faad-cab85c93ca9a/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-05T00:27:36,479 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=58134 2024-12-05T00:27:36,480 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-05T00:27:36,481 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-05T00:27:36,488 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36047 is added to blk_1073741825_1001 (size=7) 2024-12-05T00:27:36,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44469 is added to blk_1073741825_1001 (size=7) 2024-12-05T00:27:36,490 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30 with version=8 2024-12-05T00:27:36,490 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:45273/user/jenkins/test-data/7f70659c-3a14-bb99-79d3-4868f5326e37/hbase-staging 2024-12-05T00:27:36,491 INFO [Time-limited test {}] client.ConnectionUtils(128): master/2113c16e5528:0 server-side Connection retries=45 2024-12-05T00:27:36,492 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-05T00:27:36,492 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-05T00:27:36,492 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-05T00:27:36,492 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-05T00:27:36,492 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-05T00:27:36,492 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-05T00:27:36,492 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-05T00:27:36,493 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:38761 2024-12-05T00:27:36,494 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:38761 connecting to ZooKeeper ensemble=127.0.0.1:58134 2024-12-05T00:27:36,500 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:387610x0, quorum=127.0.0.1:58134, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-05T00:27:36,500 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:38761-0x1018004f92e0000 connected 2024-12-05T00:27:36,519 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-05T00:27:36,520 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-05T00:27:36,522 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:38761-0x1018004f92e0000, quorum=127.0.0.1:58134, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-05T00:27:36,522 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30, hbase.cluster.distributed=false 2024-12-05T00:27:36,523 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:38761-0x1018004f92e0000, quorum=127.0.0.1:58134, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-05T00:27:36,524 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=38761 2024-12-05T00:27:36,524 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=38761 2024-12-05T00:27:36,524 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=38761 2024-12-05T00:27:36,525 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=38761 2024-12-05T00:27:36,525 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=38761 2024-12-05T00:27:36,540 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/2113c16e5528:0 server-side Connection retries=45 2024-12-05T00:27:36,540 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-05T00:27:36,540 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-05T00:27:36,540 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-05T00:27:36,540 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-05T00:27:36,540 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-05T00:27:36,540 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-05T00:27:36,540 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-05T00:27:36,541 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:45497 2024-12-05T00:27:36,542 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:45497 connecting to ZooKeeper ensemble=127.0.0.1:58134 2024-12-05T00:27:36,542 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-05T00:27:36,544 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-05T00:27:36,549 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:454970x0, quorum=127.0.0.1:58134, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-05T00:27:36,549 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:45497-0x1018004f92e0001 connected 2024-12-05T00:27:36,549 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45497-0x1018004f92e0001, quorum=127.0.0.1:58134, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-05T00:27:36,549 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-05T00:27:36,550 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-05T00:27:36,550 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45497-0x1018004f92e0001, quorum=127.0.0.1:58134, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-05T00:27:36,551 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45497-0x1018004f92e0001, quorum=127.0.0.1:58134, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-05T00:27:36,551 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=45497 2024-12-05T00:27:36,552 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=45497 2024-12-05T00:27:36,552 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=45497 2024-12-05T00:27:36,552 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=45497 2024-12-05T00:27:36,552 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=45497 2024-12-05T00:27:36,564 DEBUG [M:0;2113c16e5528:38761 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;2113c16e5528:38761 2024-12-05T00:27:36,564 INFO [master/2113c16e5528:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/2113c16e5528,38761,1733358456491 2024-12-05T00:27:36,566 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45497-0x1018004f92e0001, quorum=127.0.0.1:58134, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-05T00:27:36,566 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38761-0x1018004f92e0000, quorum=127.0.0.1:58134, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-05T00:27:36,566 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:38761-0x1018004f92e0000, quorum=127.0.0.1:58134, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/2113c16e5528,38761,1733358456491 2024-12-05T00:27:36,569 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45497-0x1018004f92e0001, quorum=127.0.0.1:58134, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-05T00:27:36,569 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38761-0x1018004f92e0000, quorum=127.0.0.1:58134, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T00:27:36,569 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45497-0x1018004f92e0001, quorum=127.0.0.1:58134, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T00:27:36,570 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:38761-0x1018004f92e0000, quorum=127.0.0.1:58134, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-05T00:27:36,570 INFO [master/2113c16e5528:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/2113c16e5528,38761,1733358456491 from backup master directory 2024-12-05T00:27:36,571 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38761-0x1018004f92e0000, quorum=127.0.0.1:58134, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/2113c16e5528,38761,1733358456491 2024-12-05T00:27:36,571 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45497-0x1018004f92e0001, quorum=127.0.0.1:58134, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-05T00:27:36,571 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38761-0x1018004f92e0000, quorum=127.0.0.1:58134, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-05T00:27:36,571 WARN [master/2113c16e5528:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-05T00:27:36,571 INFO [master/2113c16e5528:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=2113c16e5528,38761,1733358456491 2024-12-05T00:27:36,575 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/hbase.id] with ID: 52b36340-9c5e-4752-916e-54e40f8c552b 2024-12-05T00:27:36,575 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/.tmp/hbase.id 2024-12-05T00:27:36,580 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44469 is added to blk_1073741826_1002 (size=42) 2024-12-05T00:27:36,581 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36047 is added to blk_1073741826_1002 (size=42) 2024-12-05T00:27:36,581 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/.tmp/hbase.id]:[hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/hbase.id] 2024-12-05T00:27:36,591 INFO [master/2113c16e5528:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-05T00:27:36,591 INFO [master/2113c16e5528:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-05T00:27:36,592 INFO [master/2113c16e5528:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-12-05T00:27:36,594 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38761-0x1018004f92e0000, quorum=127.0.0.1:58134, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T00:27:36,594 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45497-0x1018004f92e0001, quorum=127.0.0.1:58134, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T00:27:36,599 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36047 is added to blk_1073741827_1003 (size=196) 2024-12-05T00:27:36,600 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44469 is added to blk_1073741827_1003 (size=196) 2024-12-05T00:27:36,600 INFO [master/2113c16e5528:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-05T00:27:36,601 INFO [master/2113c16e5528:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-05T00:27:36,601 INFO [master/2113c16e5528:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-05T00:27:36,607 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44469 is added to blk_1073741828_1004 (size=1189) 2024-12-05T00:27:36,607 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36047 is added to blk_1073741828_1004 (size=1189) 2024-12-05T00:27:36,608 INFO [master/2113c16e5528:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/MasterData/data/master/store 2024-12-05T00:27:36,616 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44469 is added to blk_1073741829_1005 (size=34) 2024-12-05T00:27:36,616 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36047 is added to blk_1073741829_1005 (size=34) 2024-12-05T00:27:36,617 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T00:27:36,617 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-05T00:27:36,617 INFO [master/2113c16e5528:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-05T00:27:36,617 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-05T00:27:36,617 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-05T00:27:36,617 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-05T00:27:36,617 INFO [master/2113c16e5528:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-05T00:27:36,617 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733358456617Disabling compacts and flushes for region at 1733358456617Disabling writes for close at 1733358456617Writing region close event to WAL at 1733358456617Closed at 1733358456617 2024-12-05T00:27:36,618 WARN [master/2113c16e5528:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/MasterData/data/master/store/.initializing 2024-12-05T00:27:36,618 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/MasterData/WALs/2113c16e5528,38761,1733358456491 2024-12-05T00:27:36,620 INFO [master/2113c16e5528:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=2113c16e5528%2C38761%2C1733358456491, suffix=, logDir=hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/MasterData/WALs/2113c16e5528,38761,1733358456491, archiveDir=hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/MasterData/oldWALs, maxLogs=10 2024-12-05T00:27:36,620 INFO [master/2113c16e5528:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 2113c16e5528%2C38761%2C1733358456491.1733358456620 2024-12-05T00:27:36,625 INFO [master/2113c16e5528:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/MasterData/WALs/2113c16e5528,38761,1733358456491/2113c16e5528%2C38761%2C1733358456491.1733358456620 2024-12-05T00:27:36,626 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35401:35401),(127.0.0.1/127.0.0.1:41389:41389)] 2024-12-05T00:27:36,626 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-05T00:27:36,626 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T00:27:36,626 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-05T00:27:36,626 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-05T00:27:36,627 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-05T00:27:36,629 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-05T00:27:36,629 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T00:27:36,629 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-05T00:27:36,629 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-05T00:27:36,630 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-05T00:27:36,630 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T00:27:36,630 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T00:27:36,630 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-05T00:27:36,631 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-05T00:27:36,631 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T00:27:36,632 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T00:27:36,632 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-05T00:27:36,633 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-05T00:27:36,633 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T00:27:36,633 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T00:27:36,633 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-05T00:27:36,634 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-05T00:27:36,634 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-05T00:27:36,635 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-05T00:27:36,635 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-05T00:27:36,636 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-05T00:27:36,637 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-05T00:27:36,638 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-05T00:27:36,639 INFO [master/2113c16e5528:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=824619, jitterRate=0.04855729639530182}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-05T00:27:36,639 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733358456626Initializing all the Stores at 1733358456627 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733358456627Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733358456627Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733358456627Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733358456627Cleaning up temporary data from old regions at 1733358456635 (+8 ms)Region opened successfully at 1733358456639 (+4 ms) 2024-12-05T00:27:36,640 INFO [master/2113c16e5528:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-05T00:27:36,642 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@21afea31, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=2113c16e5528/172.17.0.2:0 2024-12-05T00:27:36,643 INFO [master/2113c16e5528:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-05T00:27:36,643 INFO [master/2113c16e5528:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-05T00:27:36,643 INFO [master/2113c16e5528:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-05T00:27:36,643 INFO [master/2113c16e5528:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-05T00:27:36,644 INFO [master/2113c16e5528:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-12-05T00:27:36,644 INFO [master/2113c16e5528:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-12-05T00:27:36,644 INFO [master/2113c16e5528:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-05T00:27:36,646 INFO [master/2113c16e5528:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-05T00:27:36,647 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38761-0x1018004f92e0000, quorum=127.0.0.1:58134, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-05T00:27:36,648 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-05T00:27:36,648 INFO [master/2113c16e5528:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-05T00:27:36,649 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38761-0x1018004f92e0000, quorum=127.0.0.1:58134, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-05T00:27:36,651 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-05T00:27:36,651 INFO [master/2113c16e5528:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-05T00:27:36,652 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38761-0x1018004f92e0000, quorum=127.0.0.1:58134, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-05T00:27:36,653 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-05T00:27:36,654 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38761-0x1018004f92e0000, quorum=127.0.0.1:58134, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-05T00:27:36,655 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-05T00:27:36,657 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38761-0x1018004f92e0000, quorum=127.0.0.1:58134, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-05T00:27:36,658 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-05T00:27:36,661 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38761-0x1018004f92e0000, quorum=127.0.0.1:58134, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-05T00:27:36,661 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45497-0x1018004f92e0001, quorum=127.0.0.1:58134, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-05T00:27:36,661 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38761-0x1018004f92e0000, quorum=127.0.0.1:58134, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T00:27:36,661 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45497-0x1018004f92e0001, quorum=127.0.0.1:58134, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T00:27:36,661 INFO [master/2113c16e5528:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=2113c16e5528,38761,1733358456491, sessionid=0x1018004f92e0000, setting cluster-up flag (Was=false) 2024-12-05T00:27:36,664 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45497-0x1018004f92e0001, quorum=127.0.0.1:58134, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T00:27:36,664 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38761-0x1018004f92e0000, quorum=127.0.0.1:58134, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T00:27:36,669 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-05T00:27:36,670 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=2113c16e5528,38761,1733358456491 2024-12-05T00:27:36,673 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45497-0x1018004f92e0001, quorum=127.0.0.1:58134, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T00:27:36,673 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38761-0x1018004f92e0000, quorum=127.0.0.1:58134, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T00:27:36,679 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-05T00:27:36,680 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=2113c16e5528,38761,1733358456491 2024-12-05T00:27:36,681 INFO [master/2113c16e5528:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-05T00:27:36,682 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-05T00:27:36,682 INFO [master/2113c16e5528:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-05T00:27:36,682 INFO [master/2113c16e5528:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-05T00:27:36,683 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 2113c16e5528,38761,1733358456491 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-05T00:27:36,683 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/2113c16e5528:0, corePoolSize=5, maxPoolSize=5 2024-12-05T00:27:36,683 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/2113c16e5528:0, corePoolSize=5, maxPoolSize=5 2024-12-05T00:27:36,684 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/2113c16e5528:0, corePoolSize=5, maxPoolSize=5 2024-12-05T00:27:36,684 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/2113c16e5528:0, corePoolSize=5, maxPoolSize=5 2024-12-05T00:27:36,684 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/2113c16e5528:0, corePoolSize=10, maxPoolSize=10 2024-12-05T00:27:36,684 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/2113c16e5528:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:27:36,684 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/2113c16e5528:0, corePoolSize=2, maxPoolSize=2 2024-12-05T00:27:36,684 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/2113c16e5528:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:27:36,684 INFO [master/2113c16e5528:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733358486684 2024-12-05T00:27:36,685 INFO [master/2113c16e5528:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-05T00:27:36,685 INFO [master/2113c16e5528:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-05T00:27:36,685 INFO [master/2113c16e5528:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-05T00:27:36,685 INFO [master/2113c16e5528:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-05T00:27:36,685 INFO [master/2113c16e5528:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-05T00:27:36,685 INFO [master/2113c16e5528:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-05T00:27:36,685 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-05T00:27:36,685 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-05T00:27:36,685 INFO [master/2113c16e5528:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-05T00:27:36,686 INFO [master/2113c16e5528:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-05T00:27:36,686 INFO [master/2113c16e5528:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-05T00:27:36,686 INFO [master/2113c16e5528:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-05T00:27:36,686 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T00:27:36,686 INFO [master/2113c16e5528:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-05T00:27:36,686 INFO [master/2113c16e5528:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-05T00:27:36,686 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-05T00:27:36,686 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/2113c16e5528:0:becomeActiveMaster-HFileCleaner.large.0-1733358456686,5,FailOnTimeoutGroup] 2024-12-05T00:27:36,686 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/2113c16e5528:0:becomeActiveMaster-HFileCleaner.small.0-1733358456686,5,FailOnTimeoutGroup] 2024-12-05T00:27:36,687 INFO [master/2113c16e5528:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-05T00:27:36,687 INFO [master/2113c16e5528:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-05T00:27:36,687 INFO [master/2113c16e5528:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-05T00:27:36,687 INFO [master/2113c16e5528:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-05T00:27:36,695 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44469 is added to blk_1073741831_1007 (size=1321) 2024-12-05T00:27:36,695 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36047 is added to blk_1073741831_1007 (size=1321) 2024-12-05T00:27:36,696 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-05T00:27:36,696 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30 2024-12-05T00:27:36,701 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44469 is added to blk_1073741832_1008 (size=32) 2024-12-05T00:27:36,702 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36047 is added to blk_1073741832_1008 (size=32) 2024-12-05T00:27:36,702 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T00:27:36,703 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-05T00:27:36,704 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-05T00:27:36,705 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T00:27:36,705 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-05T00:27:36,705 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-05T00:27:36,706 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-05T00:27:36,706 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T00:27:36,706 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-05T00:27:36,707 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-05T00:27:36,707 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-05T00:27:36,708 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T00:27:36,708 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-05T00:27:36,708 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-05T00:27:36,709 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-05T00:27:36,709 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T00:27:36,709 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-05T00:27:36,709 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-05T00:27:36,710 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/hbase/meta/1588230740 2024-12-05T00:27:36,710 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/hbase/meta/1588230740 2024-12-05T00:27:36,711 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-05T00:27:36,711 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-05T00:27:36,711 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-05T00:27:36,712 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-05T00:27:36,714 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-05T00:27:36,715 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=798031, jitterRate=0.014749303460121155}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-05T00:27:36,715 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733358456702Initializing all the Stores at 1733358456703 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733358456703Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733358456703Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733358456703Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733358456703Cleaning up temporary data from old regions at 1733358456711 (+8 ms)Region opened successfully at 1733358456715 (+4 ms) 2024-12-05T00:27:36,715 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-05T00:27:36,715 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-05T00:27:36,715 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-05T00:27:36,715 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-05T00:27:36,715 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-05T00:27:36,716 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-05T00:27:36,716 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733358456715Disabling compacts and flushes for region at 1733358456715Disabling writes for close at 1733358456715Writing region close event to WAL at 1733358456716 (+1 ms)Closed at 1733358456716 2024-12-05T00:27:36,717 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-05T00:27:36,717 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-05T00:27:36,717 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-05T00:27:36,719 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-05T00:27:36,720 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-05T00:27:36,754 INFO [RS:0;2113c16e5528:45497 {}] regionserver.HRegionServer(746): ClusterId : 52b36340-9c5e-4752-916e-54e40f8c552b 2024-12-05T00:27:36,754 DEBUG [RS:0;2113c16e5528:45497 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-05T00:27:36,756 DEBUG [RS:0;2113c16e5528:45497 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-05T00:27:36,756 DEBUG [RS:0;2113c16e5528:45497 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-05T00:27:36,759 DEBUG [RS:0;2113c16e5528:45497 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-05T00:27:36,759 DEBUG [RS:0;2113c16e5528:45497 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2ab1455a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=2113c16e5528/172.17.0.2:0 2024-12-05T00:27:36,771 DEBUG [RS:0;2113c16e5528:45497 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;2113c16e5528:45497 2024-12-05T00:27:36,771 INFO [RS:0;2113c16e5528:45497 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-05T00:27:36,771 INFO [RS:0;2113c16e5528:45497 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-05T00:27:36,771 DEBUG [RS:0;2113c16e5528:45497 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-05T00:27:36,772 INFO [RS:0;2113c16e5528:45497 {}] regionserver.HRegionServer(2659): reportForDuty to master=2113c16e5528,38761,1733358456491 with port=45497, startcode=1733358456539 2024-12-05T00:27:36,772 DEBUG [RS:0;2113c16e5528:45497 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-05T00:27:36,774 INFO [HMaster-EventLoopGroup-14-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42447, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.6 (auth:SIMPLE), service=RegionServerStatusService 2024-12-05T00:27:36,774 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38761 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 2113c16e5528,45497,1733358456539 2024-12-05T00:27:36,774 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38761 {}] master.ServerManager(517): Registering regionserver=2113c16e5528,45497,1733358456539 2024-12-05T00:27:36,776 DEBUG [RS:0;2113c16e5528:45497 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30 2024-12-05T00:27:36,776 DEBUG [RS:0;2113c16e5528:45497 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:45045 2024-12-05T00:27:36,776 DEBUG [RS:0;2113c16e5528:45497 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-05T00:27:36,777 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38761-0x1018004f92e0000, quorum=127.0.0.1:58134, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-05T00:27:36,778 DEBUG [RS:0;2113c16e5528:45497 {}] zookeeper.ZKUtil(111): regionserver:45497-0x1018004f92e0001, quorum=127.0.0.1:58134, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/2113c16e5528,45497,1733358456539 2024-12-05T00:27:36,778 WARN [RS:0;2113c16e5528:45497 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-05T00:27:36,778 INFO [RS:0;2113c16e5528:45497 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-05T00:27:36,778 DEBUG [RS:0;2113c16e5528:45497 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/WALs/2113c16e5528,45497,1733358456539 2024-12-05T00:27:36,778 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [2113c16e5528,45497,1733358456539] 2024-12-05T00:27:36,781 INFO [RS:0;2113c16e5528:45497 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-05T00:27:36,782 INFO [RS:0;2113c16e5528:45497 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-05T00:27:36,782 INFO [RS:0;2113c16e5528:45497 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-05T00:27:36,782 INFO [RS:0;2113c16e5528:45497 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-05T00:27:36,783 INFO [RS:0;2113c16e5528:45497 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-05T00:27:36,783 INFO [RS:0;2113c16e5528:45497 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-05T00:27:36,783 INFO [RS:0;2113c16e5528:45497 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-05T00:27:36,784 DEBUG [RS:0;2113c16e5528:45497 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/2113c16e5528:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:27:36,784 DEBUG [RS:0;2113c16e5528:45497 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/2113c16e5528:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:27:36,784 DEBUG [RS:0;2113c16e5528:45497 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/2113c16e5528:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:27:36,784 DEBUG [RS:0;2113c16e5528:45497 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/2113c16e5528:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:27:36,784 DEBUG [RS:0;2113c16e5528:45497 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/2113c16e5528:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:27:36,784 DEBUG [RS:0;2113c16e5528:45497 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/2113c16e5528:0, corePoolSize=2, maxPoolSize=2 2024-12-05T00:27:36,784 DEBUG [RS:0;2113c16e5528:45497 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/2113c16e5528:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:27:36,784 DEBUG [RS:0;2113c16e5528:45497 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/2113c16e5528:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:27:36,784 DEBUG [RS:0;2113c16e5528:45497 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/2113c16e5528:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:27:36,784 DEBUG [RS:0;2113c16e5528:45497 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/2113c16e5528:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:27:36,784 DEBUG [RS:0;2113c16e5528:45497 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/2113c16e5528:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:27:36,784 DEBUG [RS:0;2113c16e5528:45497 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/2113c16e5528:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:27:36,784 DEBUG [RS:0;2113c16e5528:45497 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0, corePoolSize=3, maxPoolSize=3 2024-12-05T00:27:36,784 DEBUG [RS:0;2113c16e5528:45497 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/2113c16e5528:0, corePoolSize=3, maxPoolSize=3 2024-12-05T00:27:36,784 INFO [RS:0;2113c16e5528:45497 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-05T00:27:36,784 INFO [RS:0;2113c16e5528:45497 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-05T00:27:36,784 INFO [RS:0;2113c16e5528:45497 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-05T00:27:36,784 INFO [RS:0;2113c16e5528:45497 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-05T00:27:36,784 INFO [RS:0;2113c16e5528:45497 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-05T00:27:36,784 INFO [RS:0;2113c16e5528:45497 {}] hbase.ChoreService(168): Chore ScheduledChore name=2113c16e5528,45497,1733358456539-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-05T00:27:36,799 INFO [RS:0;2113c16e5528:45497 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-05T00:27:36,799 INFO [RS:0;2113c16e5528:45497 {}] hbase.ChoreService(168): Chore ScheduledChore name=2113c16e5528,45497,1733358456539-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-05T00:27:36,799 INFO [RS:0;2113c16e5528:45497 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-05T00:27:36,799 INFO [RS:0;2113c16e5528:45497 {}] regionserver.Replication(171): 2113c16e5528,45497,1733358456539 started 2024-12-05T00:27:36,812 INFO [RS:0;2113c16e5528:45497 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-05T00:27:36,813 INFO [RS:0;2113c16e5528:45497 {}] regionserver.HRegionServer(1482): Serving as 2113c16e5528,45497,1733358456539, RpcServer on 2113c16e5528/172.17.0.2:45497, sessionid=0x1018004f92e0001 2024-12-05T00:27:36,813 DEBUG [RS:0;2113c16e5528:45497 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-05T00:27:36,813 DEBUG [RS:0;2113c16e5528:45497 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 2113c16e5528,45497,1733358456539 2024-12-05T00:27:36,813 DEBUG [RS:0;2113c16e5528:45497 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '2113c16e5528,45497,1733358456539' 2024-12-05T00:27:36,813 DEBUG [RS:0;2113c16e5528:45497 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-05T00:27:36,813 DEBUG [RS:0;2113c16e5528:45497 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-05T00:27:36,814 DEBUG [RS:0;2113c16e5528:45497 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-05T00:27:36,814 DEBUG [RS:0;2113c16e5528:45497 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-05T00:27:36,814 DEBUG [RS:0;2113c16e5528:45497 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 2113c16e5528,45497,1733358456539 2024-12-05T00:27:36,814 DEBUG [RS:0;2113c16e5528:45497 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '2113c16e5528,45497,1733358456539' 2024-12-05T00:27:36,814 DEBUG [RS:0;2113c16e5528:45497 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-05T00:27:36,814 DEBUG [RS:0;2113c16e5528:45497 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-05T00:27:36,814 DEBUG [RS:0;2113c16e5528:45497 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-05T00:27:36,814 INFO [RS:0;2113c16e5528:45497 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-05T00:27:36,814 INFO [RS:0;2113c16e5528:45497 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-05T00:27:36,870 WARN [2113c16e5528:38761 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-05T00:27:36,916 INFO [RS:0;2113c16e5528:45497 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=2113c16e5528%2C45497%2C1733358456539, suffix=, logDir=hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/WALs/2113c16e5528,45497,1733358456539, archiveDir=hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/oldWALs, maxLogs=32 2024-12-05T00:27:36,917 INFO [RS:0;2113c16e5528:45497 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 2113c16e5528%2C45497%2C1733358456539.1733358456916 2024-12-05T00:27:36,922 INFO [RS:0;2113c16e5528:45497 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/WALs/2113c16e5528,45497,1733358456539/2113c16e5528%2C45497%2C1733358456539.1733358456916 2024-12-05T00:27:36,923 DEBUG [RS:0;2113c16e5528:45497 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35401:35401),(127.0.0.1/127.0.0.1:41389:41389)] 2024-12-05T00:27:37,120 DEBUG [2113c16e5528:38761 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-05T00:27:37,121 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=2113c16e5528,45497,1733358456539 2024-12-05T00:27:37,122 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 2113c16e5528,45497,1733358456539, state=OPENING 2024-12-05T00:27:37,123 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-05T00:27:37,125 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38761-0x1018004f92e0000, quorum=127.0.0.1:58134, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T00:27:37,125 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45497-0x1018004f92e0001, quorum=127.0.0.1:58134, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T00:27:37,125 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-05T00:27:37,125 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-05T00:27:37,125 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-05T00:27:37,126 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=2113c16e5528,45497,1733358456539}] 2024-12-05T00:27:37,230 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,45031,1733358326108/2113c16e5528%2C45031%2C1733358326108.1733358326307 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:27:37,231 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,37749,1733358324931/2113c16e5528%2C37749%2C1733358324931.meta.1733358325913.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:27:37,278 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-05T00:27:37,280 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51207, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-05T00:27:37,284 INFO [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-05T00:27:37,284 INFO [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-05T00:27:37,285 INFO [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=2113c16e5528%2C45497%2C1733358456539.meta, suffix=.meta, logDir=hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/WALs/2113c16e5528,45497,1733358456539, archiveDir=hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/oldWALs, maxLogs=32 2024-12-05T00:27:37,286 INFO [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 2113c16e5528%2C45497%2C1733358456539.meta.1733358457286.meta 2024-12-05T00:27:37,290 INFO [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/WALs/2113c16e5528,45497,1733358456539/2113c16e5528%2C45497%2C1733358456539.meta.1733358457286.meta 2024-12-05T00:27:37,293 DEBUG [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41389:41389),(127.0.0.1/127.0.0.1:35401:35401)] 2024-12-05T00:27:37,297 DEBUG [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-05T00:27:37,297 DEBUG [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-05T00:27:37,297 DEBUG [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-05T00:27:37,297 INFO [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-05T00:27:37,297 DEBUG [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-05T00:27:37,297 DEBUG [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T00:27:37,297 DEBUG [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-05T00:27:37,297 DEBUG [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-05T00:27:37,298 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-05T00:27:37,299 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-05T00:27:37,299 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T00:27:37,299 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-05T00:27:37,300 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-05T00:27:37,300 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-05T00:27:37,300 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T00:27:37,301 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-05T00:27:37,301 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-05T00:27:37,301 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-05T00:27:37,301 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T00:27:37,302 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-05T00:27:37,302 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-05T00:27:37,302 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-05T00:27:37,302 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T00:27:37,303 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-05T00:27:37,303 DEBUG [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-05T00:27:37,303 DEBUG [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/hbase/meta/1588230740 2024-12-05T00:27:37,304 DEBUG [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/hbase/meta/1588230740 2024-12-05T00:27:37,306 DEBUG [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-05T00:27:37,306 DEBUG [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-05T00:27:37,306 DEBUG [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-05T00:27:37,307 DEBUG [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-05T00:27:37,308 INFO [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=764906, jitterRate=-0.02737198770046234}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-05T00:27:37,308 DEBUG [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-05T00:27:37,308 DEBUG [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733358457297Writing region info on filesystem at 1733358457297Initializing all the Stores at 1733358457298 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733358457298Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733358457298Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733358457298Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733358457298Cleaning up temporary data from old regions at 1733358457306 (+8 ms)Running coprocessor post-open hooks at 1733358457308 (+2 ms)Region opened successfully at 1733358457308 2024-12-05T00:27:37,309 INFO [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733358457278 2024-12-05T00:27:37,312 DEBUG [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-05T00:27:37,312 INFO [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-05T00:27:37,313 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=2113c16e5528,45497,1733358456539 2024-12-05T00:27:37,313 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 2113c16e5528,45497,1733358456539, state=OPEN 2024-12-05T00:27:37,325 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45497-0x1018004f92e0001, quorum=127.0.0.1:58134, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-05T00:27:37,325 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38761-0x1018004f92e0000, quorum=127.0.0.1:58134, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-05T00:27:37,325 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=2113c16e5528,45497,1733358456539 2024-12-05T00:27:37,325 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-05T00:27:37,325 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-05T00:27:37,328 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-05T00:27:37,328 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=2113c16e5528,45497,1733358456539 in 199 msec 2024-12-05T00:27:37,330 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-05T00:27:37,330 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 611 msec 2024-12-05T00:27:37,331 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-05T00:27:37,331 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-05T00:27:37,332 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-05T00:27:37,332 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=2113c16e5528,45497,1733358456539, seqNum=-1] 2024-12-05T00:27:37,332 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-05T00:27:37,333 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56773, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-05T00:27:37,338 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 656 msec 2024-12-05T00:27:37,338 INFO [master/2113c16e5528:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733358457338, completionTime=-1 2024-12-05T00:27:37,338 INFO [master/2113c16e5528:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-05T00:27:37,338 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-05T00:27:37,340 INFO [master/2113c16e5528:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-12-05T00:27:37,340 INFO [master/2113c16e5528:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733358517340 2024-12-05T00:27:37,340 INFO [master/2113c16e5528:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733358577340 2024-12-05T00:27:37,340 INFO [master/2113c16e5528:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-12-05T00:27:37,341 INFO [master/2113c16e5528:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=2113c16e5528,38761,1733358456491-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-05T00:27:37,341 INFO [master/2113c16e5528:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=2113c16e5528,38761,1733358456491-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-05T00:27:37,341 INFO [master/2113c16e5528:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=2113c16e5528,38761,1733358456491-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-05T00:27:37,341 INFO [master/2113c16e5528:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-2113c16e5528:38761, period=300000, unit=MILLISECONDS is enabled. 2024-12-05T00:27:37,341 INFO [master/2113c16e5528:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-05T00:27:37,342 INFO [master/2113c16e5528:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-05T00:27:37,343 DEBUG [master/2113c16e5528:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-05T00:27:37,345 INFO [master/2113c16e5528:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.773sec 2024-12-05T00:27:37,345 INFO [master/2113c16e5528:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-05T00:27:37,345 INFO [master/2113c16e5528:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-05T00:27:37,345 INFO [master/2113c16e5528:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-05T00:27:37,345 INFO [master/2113c16e5528:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-05T00:27:37,345 INFO [master/2113c16e5528:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-05T00:27:37,345 INFO [master/2113c16e5528:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=2113c16e5528,38761,1733358456491-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-05T00:27:37,345 INFO [master/2113c16e5528:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=2113c16e5528,38761,1733358456491-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-05T00:27:37,347 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-05T00:27:37,347 INFO [master/2113c16e5528:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-05T00:27:37,347 INFO [master/2113c16e5528:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=2113c16e5528,38761,1733358456491-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-05T00:27:37,354 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@49e87d3f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T00:27:37,354 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 2113c16e5528,38761,-1 for getting cluster id 2024-12-05T00:27:37,354 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-05T00:27:37,356 DEBUG [HMaster-EventLoopGroup-14-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '52b36340-9c5e-4752-916e-54e40f8c552b' 2024-12-05T00:27:37,356 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-05T00:27:37,356 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "52b36340-9c5e-4752-916e-54e40f8c552b" 2024-12-05T00:27:37,356 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@38bc234f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T00:27:37,356 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [2113c16e5528,38761,-1] 2024-12-05T00:27:37,356 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-05T00:27:37,356 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T00:27:37,357 INFO [HMaster-EventLoopGroup-14-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42756, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-05T00:27:37,358 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5ab86f9f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T00:27:37,358 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-05T00:27:37,359 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=2113c16e5528,45497,1733358456539, seqNum=-1] 2024-12-05T00:27:37,359 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-05T00:27:37,360 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58192, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-05T00:27:37,361 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=2113c16e5528,38761,1733358456491 2024-12-05T00:27:37,362 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-05T00:27:37,364 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-12-05T00:27:37,364 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-05T00:27:37,365 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.AsyncConnectionImpl(321): The fetched master address is 2113c16e5528,38761,1733358456491 2024-12-05T00:27:37,365 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@34648aa 2024-12-05T00:27:37,365 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-05T00:27:37,366 INFO [HMaster-EventLoopGroup-14-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42766, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-05T00:27:37,366 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38761 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-12-05T00:27:37,366 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38761 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-12-05T00:27:37,367 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38761 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-05T00:27:37,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38761 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRolling 2024-12-05T00:27:37,369 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-12-05T00:27:37,369 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T00:27:37,369 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38761 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRolling" procId is: 4 2024-12-05T00:27:37,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38761 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-05T00:27:37,370 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-05T00:27:37,376 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44469 is added to blk_1073741835_1011 (size=381) 2024-12-05T00:27:37,377 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36047 is added to blk_1073741835_1011 (size=381) 2024-12-05T00:27:37,378 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => aac3f55253ff6dfc9aa5bff573ffec89, NAME => 'TestLogRolling-testLogRolling,,1733358457366.aac3f55253ff6dfc9aa5bff573ffec89.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30 2024-12-05T00:27:37,384 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44469 is added to blk_1073741836_1012 (size=64) 2024-12-05T00:27:37,385 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36047 is added to blk_1073741836_1012 (size=64) 2024-12-05T00:27:37,385 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1733358457366.aac3f55253ff6dfc9aa5bff573ffec89.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T00:27:37,385 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1722): Closing aac3f55253ff6dfc9aa5bff573ffec89, disabling compactions & flushes 2024-12-05T00:27:37,385 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1733358457366.aac3f55253ff6dfc9aa5bff573ffec89. 2024-12-05T00:27:37,385 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1733358457366.aac3f55253ff6dfc9aa5bff573ffec89. 2024-12-05T00:27:37,385 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1733358457366.aac3f55253ff6dfc9aa5bff573ffec89. after waiting 0 ms 2024-12-05T00:27:37,385 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1733358457366.aac3f55253ff6dfc9aa5bff573ffec89. 2024-12-05T00:27:37,385 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1733358457366.aac3f55253ff6dfc9aa5bff573ffec89. 2024-12-05T00:27:37,385 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for aac3f55253ff6dfc9aa5bff573ffec89: Waiting for close lock at 1733358457385Disabling compacts and flushes for region at 1733358457385Disabling writes for close at 1733358457385Writing region close event to WAL at 1733358457385Closed at 1733358457385 2024-12-05T00:27:37,387 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_ADD_TO_META 2024-12-05T00:27:37,387 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRolling,,1733358457366.aac3f55253ff6dfc9aa5bff573ffec89.","families":{"info":[{"qualifier":"regioninfo","vlen":63,"tag":[],"timestamp":"1733358457387"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733358457387"}]},"ts":"1733358457387"} 2024-12-05T00:27:37,389 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-05T00:27:37,390 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-05T00:27:37,390 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733358457390"}]},"ts":"1733358457390"} 2024-12-05T00:27:37,392 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRolling, state=ENABLING in hbase:meta 2024-12-05T00:27:37,392 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=aac3f55253ff6dfc9aa5bff573ffec89, ASSIGN}] 2024-12-05T00:27:37,393 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=aac3f55253ff6dfc9aa5bff573ffec89, ASSIGN 2024-12-05T00:27:37,394 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=aac3f55253ff6dfc9aa5bff573ffec89, ASSIGN; state=OFFLINE, location=2113c16e5528,45497,1733358456539; forceNewPlan=false, retain=false 2024-12-05T00:27:37,545 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=aac3f55253ff6dfc9aa5bff573ffec89, regionState=OPENING, regionLocation=2113c16e5528,45497,1733358456539 2024-12-05T00:27:37,547 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=aac3f55253ff6dfc9aa5bff573ffec89, ASSIGN because future has completed 2024-12-05T00:27:37,548 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure aac3f55253ff6dfc9aa5bff573ffec89, server=2113c16e5528,45497,1733358456539}] 2024-12-05T00:27:37,704 INFO [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,,1733358457366.aac3f55253ff6dfc9aa5bff573ffec89. 2024-12-05T00:27:37,705 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => aac3f55253ff6dfc9aa5bff573ffec89, NAME => 'TestLogRolling-testLogRolling,,1733358457366.aac3f55253ff6dfc9aa5bff573ffec89.', STARTKEY => '', ENDKEY => ''} 2024-12-05T00:27:37,705 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling aac3f55253ff6dfc9aa5bff573ffec89 2024-12-05T00:27:37,705 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1733358457366.aac3f55253ff6dfc9aa5bff573ffec89.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T00:27:37,705 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for aac3f55253ff6dfc9aa5bff573ffec89 2024-12-05T00:27:37,705 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for aac3f55253ff6dfc9aa5bff573ffec89 2024-12-05T00:27:37,706 INFO [StoreOpener-aac3f55253ff6dfc9aa5bff573ffec89-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region aac3f55253ff6dfc9aa5bff573ffec89 2024-12-05T00:27:37,707 INFO [StoreOpener-aac3f55253ff6dfc9aa5bff573ffec89-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region aac3f55253ff6dfc9aa5bff573ffec89 columnFamilyName info 2024-12-05T00:27:37,707 DEBUG [StoreOpener-aac3f55253ff6dfc9aa5bff573ffec89-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T00:27:37,708 INFO [StoreOpener-aac3f55253ff6dfc9aa5bff573ffec89-1 {}] regionserver.HStore(327): Store=aac3f55253ff6dfc9aa5bff573ffec89/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T00:27:37,708 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for aac3f55253ff6dfc9aa5bff573ffec89 2024-12-05T00:27:37,709 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/aac3f55253ff6dfc9aa5bff573ffec89 2024-12-05T00:27:37,709 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/aac3f55253ff6dfc9aa5bff573ffec89 2024-12-05T00:27:37,709 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for aac3f55253ff6dfc9aa5bff573ffec89 2024-12-05T00:27:37,709 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for aac3f55253ff6dfc9aa5bff573ffec89 2024-12-05T00:27:37,710 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for aac3f55253ff6dfc9aa5bff573ffec89 2024-12-05T00:27:37,712 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/aac3f55253ff6dfc9aa5bff573ffec89/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-05T00:27:37,712 INFO [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened aac3f55253ff6dfc9aa5bff573ffec89; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=818741, jitterRate=0.041084155440330505}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-05T00:27:37,712 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for aac3f55253ff6dfc9aa5bff573ffec89 2024-12-05T00:27:37,713 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for aac3f55253ff6dfc9aa5bff573ffec89: Running coprocessor pre-open hook at 1733358457705Writing region info on filesystem at 1733358457705Initializing all the Stores at 1733358457706 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733358457706Cleaning up temporary data from old regions at 1733358457709 (+3 ms)Running coprocessor post-open hooks at 1733358457713 (+4 ms)Region opened successfully at 1733358457713 2024-12-05T00:27:37,714 INFO [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,,1733358457366.aac3f55253ff6dfc9aa5bff573ffec89., pid=6, masterSystemTime=1733358457700 2024-12-05T00:27:37,716 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,,1733358457366.aac3f55253ff6dfc9aa5bff573ffec89. 2024-12-05T00:27:37,716 INFO [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,,1733358457366.aac3f55253ff6dfc9aa5bff573ffec89. 2024-12-05T00:27:37,717 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=aac3f55253ff6dfc9aa5bff573ffec89, regionState=OPEN, openSeqNum=2, regionLocation=2113c16e5528,45497,1733358456539 2024-12-05T00:27:37,719 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure aac3f55253ff6dfc9aa5bff573ffec89, server=2113c16e5528,45497,1733358456539 because future has completed 2024-12-05T00:27:37,722 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-12-05T00:27:37,722 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure aac3f55253ff6dfc9aa5bff573ffec89, server=2113c16e5528,45497,1733358456539 in 172 msec 2024-12-05T00:27:37,724 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-12-05T00:27:37,724 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=aac3f55253ff6dfc9aa5bff573ffec89, ASSIGN in 330 msec 2024-12-05T00:27:37,725 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-05T00:27:37,726 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733358457725"}]},"ts":"1733358457725"} 2024-12-05T00:27:37,727 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRolling, state=ENABLED in hbase:meta 2024-12-05T00:27:37,729 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_POST_OPERATION 2024-12-05T00:27:37,731 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRolling in 362 msec 2024-12-05T00:27:38,231 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,45031,1733358326108/2113c16e5528%2C45031%2C1733358326108.1733358326307 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:27:38,231 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,37749,1733358324931/2113c16e5528%2C37749%2C1733358324931.meta.1733358325913.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:27:39,232 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,45031,1733358326108/2113c16e5528%2C45031%2C1733358326108.1733358326307 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:27:39,232 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,37749,1733358324931/2113c16e5528%2C37749%2C1733358324931.meta.1733358325913.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:27:40,232 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,45031,1733358326108/2113c16e5528%2C45031%2C1733358326108.1733358326307 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:27:40,232 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,37749,1733358324931/2113c16e5528%2C37749%2C1733358324931.meta.1733358325913.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:27:40,342 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-05T00:27:40,342 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-05T00:27:40,342 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-05T00:27:40,342 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-05T00:27:40,342 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-05T00:27:40,342 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-05T00:27:40,343 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-05T00:27:40,343 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-05T00:27:40,357 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-05T00:27:40,358 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-05T00:27:40,358 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-05T00:27:40,358 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-05T00:27:40,358 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-05T00:27:40,358 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-05T00:27:40,361 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-05T00:27:40,361 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-05T00:27:40,361 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-05T00:27:40,363 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-05T00:27:40,867 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-05T00:27:40,868 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-05T00:27:40,868 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-05T00:27:40,868 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-05T00:27:40,868 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-05T00:27:40,868 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-05T00:27:40,868 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-05T00:27:40,869 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-05T00:27:40,869 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-05T00:27:40,888 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-05T00:27:40,889 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-05T00:27:40,889 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-05T00:27:40,889 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-05T00:27:40,889 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-05T00:27:40,889 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-05T00:27:40,892 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-05T00:27:40,892 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-05T00:27:40,893 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-05T00:27:40,895 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-05T00:27:41,233 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,37749,1733358324931/2113c16e5528%2C37749%2C1733358324931.meta.1733358325913.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:27:41,233 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,45031,1733358326108/2113c16e5528%2C45031%2C1733358326108.1733358326307 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:27:42,233 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,45031,1733358326108/2113c16e5528%2C45031%2C1733358326108.1733358326307 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:27:42,233 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,37749,1733358324931/2113c16e5528%2C37749%2C1733358324931.meta.1733358325913.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:27:42,781 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-05T00:27:42,781 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRolling' 2024-12-05T00:27:43,234 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,45031,1733358326108/2113c16e5528%2C45031%2C1733358326108.1733358326307 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:27:43,234 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,37749,1733358324931/2113c16e5528%2C37749%2C1733358324931.meta.1733358325913.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:27:44,235 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,37749,1733358324931/2113c16e5528%2C37749%2C1733358324931.meta.1733358325913.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:27:44,235 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,45031,1733358326108/2113c16e5528%2C45031%2C1733358326108.1733358326307 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:27:44,743 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling 2024-12-05T00:27:44,743 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling Metrics about Tables on a single HBase RegionServer 2024-12-05T00:27:44,744 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-05T00:27:45,235 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,45031,1733358326108/2113c16e5528%2C45031%2C1733358326108.1733358326307 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:27:45,235 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,37749,1733358324931/2113c16e5528%2C37749%2C1733358324931.meta.1733358325913.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:27:46,236 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,45031,1733358326108/2113c16e5528%2C45031%2C1733358326108.1733358326307 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:27:46,236 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,37749,1733358324931/2113c16e5528%2C37749%2C1733358324931.meta.1733358325913.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:27:47,236 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,37749,1733358324931/2113c16e5528%2C37749%2C1733358324931.meta.1733358325913.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:27:47,236 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,45031,1733358326108/2113c16e5528%2C45031%2C1733358326108.1733358326307 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:27:47,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38761 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-05T00:27:47,461 INFO [RPCClient-NioEventLoopGroup-4-7 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRolling completed 2024-12-05T00:27:47,461 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRolling,, stopping at row=TestLogRolling-testLogRolling ,, for max=2147483647 with caching=100 2024-12-05T00:27:47,464 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRolling 2024-12-05T00:27:47,464 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRolling,,1733358457366.aac3f55253ff6dfc9aa5bff573ffec89. 2024-12-05T00:27:47,466 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testLogRolling,,1733358457366.aac3f55253ff6dfc9aa5bff573ffec89., hostname=2113c16e5528,45497,1733358456539, seqNum=2] 2024-12-05T00:27:47,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45497 {}] regionserver.HRegion(8855): Flush requested on aac3f55253ff6dfc9aa5bff573ffec89 2024-12-05T00:27:47,478 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing aac3f55253ff6dfc9aa5bff573ffec89 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-05T00:27:47,495 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/aac3f55253ff6dfc9aa5bff573ffec89/.tmp/info/7e30f5b4090f42febe7fee906e320fad is 1080, key is row0001/info:/1733358467467/Put/seqid=0 2024-12-05T00:27:47,501 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36047 is added to blk_1073741837_1013 (size=12509) 2024-12-05T00:27:47,501 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44469 is added to blk_1073741837_1013 (size=12509) 2024-12-05T00:27:47,502 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/aac3f55253ff6dfc9aa5bff573ffec89/.tmp/info/7e30f5b4090f42febe7fee906e320fad 2024-12-05T00:27:47,509 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/aac3f55253ff6dfc9aa5bff573ffec89/.tmp/info/7e30f5b4090f42febe7fee906e320fad as hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/aac3f55253ff6dfc9aa5bff573ffec89/info/7e30f5b4090f42febe7fee906e320fad 2024-12-05T00:27:47,515 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/aac3f55253ff6dfc9aa5bff573ffec89/info/7e30f5b4090f42febe7fee906e320fad, entries=7, sequenceid=11, filesize=12.2 K 2024-12-05T00:27:47,516 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=23.12 KB/23672 for aac3f55253ff6dfc9aa5bff573ffec89 in 38ms, sequenceid=11, compaction requested=false 2024-12-05T00:27:47,516 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for aac3f55253ff6dfc9aa5bff573ffec89: 2024-12-05T00:27:47,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45497 {}] regionserver.HRegion(8855): Flush requested on aac3f55253ff6dfc9aa5bff573ffec89 2024-12-05T00:27:47,518 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing aac3f55253ff6dfc9aa5bff573ffec89 1/1 column families, dataSize=24.17 KB heapSize=26.13 KB 2024-12-05T00:27:47,523 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/aac3f55253ff6dfc9aa5bff573ffec89/.tmp/info/96ea172a18094be6b34229b2f4b86d59 is 1080, key is row0008/info:/1733358467479/Put/seqid=0 2024-12-05T00:27:47,527 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36047 is added to blk_1073741838_1014 (size=29761) 2024-12-05T00:27:47,528 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44469 is added to blk_1073741838_1014 (size=29761) 2024-12-05T00:27:47,528 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.17 KB at sequenceid=37 (bloomFilter=true), to=hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/aac3f55253ff6dfc9aa5bff573ffec89/.tmp/info/96ea172a18094be6b34229b2f4b86d59 2024-12-05T00:27:47,532 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/aac3f55253ff6dfc9aa5bff573ffec89/.tmp/info/96ea172a18094be6b34229b2f4b86d59 as hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/aac3f55253ff6dfc9aa5bff573ffec89/info/96ea172a18094be6b34229b2f4b86d59 2024-12-05T00:27:47,537 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/aac3f55253ff6dfc9aa5bff573ffec89/info/96ea172a18094be6b34229b2f4b86d59, entries=23, sequenceid=37, filesize=29.1 K 2024-12-05T00:27:47,538 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~24.17 KB/24748, heapSize ~26.11 KB/26736, currentSize=2.10 KB/2152 for aac3f55253ff6dfc9aa5bff573ffec89 in 20ms, sequenceid=37, compaction requested=false 2024-12-05T00:27:47,538 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for aac3f55253ff6dfc9aa5bff573ffec89: 2024-12-05T00:27:47,538 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=41.3 K, sizeToCheck=16.0 K 2024-12-05T00:27:47,538 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-05T00:27:47,538 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/aac3f55253ff6dfc9aa5bff573ffec89/info/96ea172a18094be6b34229b2f4b86d59 because midkey is the same as first or last row 2024-12-05T00:27:48,237 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,45031,1733358326108/2113c16e5528%2C45031%2C1733358326108.1733358326307 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:27:48,237 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,37749,1733358324931/2113c16e5528%2C37749%2C1733358324931.meta.1733358325913.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:27:49,238 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,45031,1733358326108/2113c16e5528%2C45031%2C1733358326108.1733358326307 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:27:49,238 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,37749,1733358324931/2113c16e5528%2C37749%2C1733358324931.meta.1733358325913.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:27:49,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45497 {}] regionserver.HRegion(8855): Flush requested on aac3f55253ff6dfc9aa5bff573ffec89 2024-12-05T00:27:49,530 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing aac3f55253ff6dfc9aa5bff573ffec89 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-05T00:27:49,535 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/aac3f55253ff6dfc9aa5bff573ffec89/.tmp/info/3cd9ef399f944f2887857e37ce16522e is 1080, key is row0031/info:/1733358467519/Put/seqid=0 2024-12-05T00:27:49,539 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44469 is added to blk_1073741839_1015 (size=12509) 2024-12-05T00:27:49,540 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36047 is added to blk_1073741839_1015 (size=12509) 2024-12-05T00:27:49,540 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=47 (bloomFilter=true), to=hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/aac3f55253ff6dfc9aa5bff573ffec89/.tmp/info/3cd9ef399f944f2887857e37ce16522e 2024-12-05T00:27:49,546 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/aac3f55253ff6dfc9aa5bff573ffec89/.tmp/info/3cd9ef399f944f2887857e37ce16522e as hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/aac3f55253ff6dfc9aa5bff573ffec89/info/3cd9ef399f944f2887857e37ce16522e 2024-12-05T00:27:49,552 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/aac3f55253ff6dfc9aa5bff573ffec89/info/3cd9ef399f944f2887857e37ce16522e, entries=7, sequenceid=47, filesize=12.2 K 2024-12-05T00:27:49,553 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=13.66 KB/13988 for aac3f55253ff6dfc9aa5bff573ffec89 in 23ms, sequenceid=47, compaction requested=true 2024-12-05T00:27:49,553 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for aac3f55253ff6dfc9aa5bff573ffec89: 2024-12-05T00:27:49,553 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=53.5 K, sizeToCheck=16.0 K 2024-12-05T00:27:49,553 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-05T00:27:49,553 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/aac3f55253ff6dfc9aa5bff573ffec89/info/96ea172a18094be6b34229b2f4b86d59 because midkey is the same as first or last row 2024-12-05T00:27:49,553 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store aac3f55253ff6dfc9aa5bff573ffec89:info, priority=-2147483648, current under compaction store size is 1 2024-12-05T00:27:49,553 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-05T00:27:49,554 DEBUG [RS:0;2113c16e5528:45497-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-05T00:27:49,555 DEBUG [RS:0;2113c16e5528:45497-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 54779 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-05T00:27:49,555 DEBUG [RS:0;2113c16e5528:45497-shortCompactions-0 {}] regionserver.HStore(1541): aac3f55253ff6dfc9aa5bff573ffec89/info is initiating minor compaction (all files) 2024-12-05T00:27:49,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45497 {}] regionserver.HRegion(8855): Flush requested on aac3f55253ff6dfc9aa5bff573ffec89 2024-12-05T00:27:49,555 INFO [RS:0;2113c16e5528:45497-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of aac3f55253ff6dfc9aa5bff573ffec89/info in TestLogRolling-testLogRolling,,1733358457366.aac3f55253ff6dfc9aa5bff573ffec89. 2024-12-05T00:27:49,555 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing aac3f55253ff6dfc9aa5bff573ffec89 1/1 column families, dataSize=15.76 KB heapSize=17.13 KB 2024-12-05T00:27:49,555 INFO [RS:0;2113c16e5528:45497-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/aac3f55253ff6dfc9aa5bff573ffec89/info/7e30f5b4090f42febe7fee906e320fad, hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/aac3f55253ff6dfc9aa5bff573ffec89/info/96ea172a18094be6b34229b2f4b86d59, hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/aac3f55253ff6dfc9aa5bff573ffec89/info/3cd9ef399f944f2887857e37ce16522e] into tmpdir=hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/aac3f55253ff6dfc9aa5bff573ffec89/.tmp, totalSize=53.5 K 2024-12-05T00:27:49,556 DEBUG [RS:0;2113c16e5528:45497-shortCompactions-0 {}] compactions.Compactor(225): Compacting 7e30f5b4090f42febe7fee906e320fad, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1733358467467 2024-12-05T00:27:49,556 DEBUG [RS:0;2113c16e5528:45497-shortCompactions-0 {}] compactions.Compactor(225): Compacting 96ea172a18094be6b34229b2f4b86d59, keycount=23, bloomtype=ROW, size=29.1 K, encoding=NONE, compression=NONE, seqNum=37, earliestPutTs=1733358467479 2024-12-05T00:27:49,557 DEBUG [RS:0;2113c16e5528:45497-shortCompactions-0 {}] compactions.Compactor(225): Compacting 3cd9ef399f944f2887857e37ce16522e, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=47, earliestPutTs=1733358467519 2024-12-05T00:27:49,559 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/aac3f55253ff6dfc9aa5bff573ffec89/.tmp/info/f5cd93a32b054b8aa9e8d3b3e5473b1b is 1080, key is row0038/info:/1733358469531/Put/seqid=0 2024-12-05T00:27:49,565 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36047 is added to blk_1073741840_1016 (size=21141) 2024-12-05T00:27:49,566 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44469 is added to blk_1073741840_1016 (size=21141) 2024-12-05T00:27:49,566 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.76 KB at sequenceid=65 (bloomFilter=true), to=hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/aac3f55253ff6dfc9aa5bff573ffec89/.tmp/info/f5cd93a32b054b8aa9e8d3b3e5473b1b 2024-12-05T00:27:49,571 INFO [RS:0;2113c16e5528:45497-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): aac3f55253ff6dfc9aa5bff573ffec89#info#compaction#58 average throughput is 18.98 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-05T00:27:49,572 DEBUG [RS:0;2113c16e5528:45497-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/aac3f55253ff6dfc9aa5bff573ffec89/.tmp/info/a2b696823f9840bfb9f58e1aac5e630c is 1080, key is row0001/info:/1733358467467/Put/seqid=0 2024-12-05T00:27:49,574 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/aac3f55253ff6dfc9aa5bff573ffec89/.tmp/info/f5cd93a32b054b8aa9e8d3b3e5473b1b as hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/aac3f55253ff6dfc9aa5bff573ffec89/info/f5cd93a32b054b8aa9e8d3b3e5473b1b 2024-12-05T00:27:49,578 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44469 is added to blk_1073741841_1017 (size=44978) 2024-12-05T00:27:49,578 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36047 is added to blk_1073741841_1017 (size=44978) 2024-12-05T00:27:49,581 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/aac3f55253ff6dfc9aa5bff573ffec89/info/f5cd93a32b054b8aa9e8d3b3e5473b1b, entries=15, sequenceid=65, filesize=20.6 K 2024-12-05T00:27:49,582 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~15.76 KB/16140, heapSize ~17.11 KB/17520, currentSize=12.61 KB/12912 for aac3f55253ff6dfc9aa5bff573ffec89 in 27ms, sequenceid=65, compaction requested=false 2024-12-05T00:27:49,582 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for aac3f55253ff6dfc9aa5bff573ffec89: 2024-12-05T00:27:49,582 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=74.1 K, sizeToCheck=16.0 K 2024-12-05T00:27:49,582 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-05T00:27:49,582 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/aac3f55253ff6dfc9aa5bff573ffec89/info/96ea172a18094be6b34229b2f4b86d59 because midkey is the same as first or last row 2024-12-05T00:27:49,586 DEBUG [RS:0;2113c16e5528:45497-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/aac3f55253ff6dfc9aa5bff573ffec89/.tmp/info/a2b696823f9840bfb9f58e1aac5e630c as hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/aac3f55253ff6dfc9aa5bff573ffec89/info/a2b696823f9840bfb9f58e1aac5e630c 2024-12-05T00:27:49,591 INFO [RS:0;2113c16e5528:45497-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in aac3f55253ff6dfc9aa5bff573ffec89/info of aac3f55253ff6dfc9aa5bff573ffec89 into a2b696823f9840bfb9f58e1aac5e630c(size=43.9 K), total size for store is 64.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-05T00:27:49,591 DEBUG [RS:0;2113c16e5528:45497-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for aac3f55253ff6dfc9aa5bff573ffec89: 2024-12-05T00:27:49,591 INFO [RS:0;2113c16e5528:45497-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1733358457366.aac3f55253ff6dfc9aa5bff573ffec89., storeName=aac3f55253ff6dfc9aa5bff573ffec89/info, priority=13, startTime=1733358469553; duration=0sec 2024-12-05T00:27:49,592 DEBUG [RS:0;2113c16e5528:45497-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=64.6 K, sizeToCheck=16.0 K 2024-12-05T00:27:49,592 DEBUG [RS:0;2113c16e5528:45497-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-05T00:27:49,592 DEBUG [RS:0;2113c16e5528:45497-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/aac3f55253ff6dfc9aa5bff573ffec89/info/a2b696823f9840bfb9f58e1aac5e630c because midkey is the same as first or last row 2024-12-05T00:27:49,592 DEBUG [RS:0;2113c16e5528:45497-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=64.6 K, sizeToCheck=16.0 K 2024-12-05T00:27:49,592 DEBUG [RS:0;2113c16e5528:45497-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-05T00:27:49,592 DEBUG [RS:0;2113c16e5528:45497-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/aac3f55253ff6dfc9aa5bff573ffec89/info/a2b696823f9840bfb9f58e1aac5e630c because midkey is the same as first or last row 2024-12-05T00:27:49,592 DEBUG [RS:0;2113c16e5528:45497-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=64.6 K, sizeToCheck=16.0 K 2024-12-05T00:27:49,592 DEBUG [RS:0;2113c16e5528:45497-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-05T00:27:49,592 DEBUG [RS:0;2113c16e5528:45497-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/aac3f55253ff6dfc9aa5bff573ffec89/info/a2b696823f9840bfb9f58e1aac5e630c because midkey is the same as first or last row 2024-12-05T00:27:49,592 DEBUG [RS:0;2113c16e5528:45497-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-05T00:27:49,592 DEBUG [RS:0;2113c16e5528:45497-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: aac3f55253ff6dfc9aa5bff573ffec89:info 2024-12-05T00:27:50,238 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,37749,1733358324931/2113c16e5528%2C37749%2C1733358324931.meta.1733358325913.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:27:50,238 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,45031,1733358326108/2113c16e5528%2C45031%2C1733358326108.1733358326307 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:27:50,246 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-05T00:27:50,247 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-05T00:27:50,247 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-05T00:27:50,247 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-05T00:27:50,248 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-05T00:27:50,248 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-05T00:27:50,248 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-05T00:27:50,249 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-05T00:27:50,249 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-05T00:27:50,277 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-05T00:27:50,277 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-05T00:27:50,277 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-05T00:27:50,278 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-05T00:27:50,278 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-05T00:27:50,278 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-05T00:27:50,281 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-05T00:27:50,281 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-05T00:27:50,281 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-05T00:27:50,284 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-05T00:27:51,239 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,45031,1733358326108/2113c16e5528%2C45031%2C1733358326108.1733358326307 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:27:51,239 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,37749,1733358324931/2113c16e5528%2C37749%2C1733358324931.meta.1733358325913.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:27:51,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45497 {}] regionserver.HRegion(8855): Flush requested on aac3f55253ff6dfc9aa5bff573ffec89 2024-12-05T00:27:51,582 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing aac3f55253ff6dfc9aa5bff573ffec89 1/1 column families, dataSize=13.66 KB heapSize=14.88 KB 2024-12-05T00:27:51,586 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/aac3f55253ff6dfc9aa5bff573ffec89/.tmp/info/39d4a62c196a40a889ea67b6518e575d is 1080, key is row0053/info:/1733358469556/Put/seqid=0 2024-12-05T00:27:51,591 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44469 is added to blk_1073741842_1018 (size=18987) 2024-12-05T00:27:51,592 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36047 is added to blk_1073741842_1018 (size=18987) 2024-12-05T00:27:51,592 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.66 KB at sequenceid=82 (bloomFilter=true), to=hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/aac3f55253ff6dfc9aa5bff573ffec89/.tmp/info/39d4a62c196a40a889ea67b6518e575d 2024-12-05T00:27:51,598 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/aac3f55253ff6dfc9aa5bff573ffec89/.tmp/info/39d4a62c196a40a889ea67b6518e575d as hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/aac3f55253ff6dfc9aa5bff573ffec89/info/39d4a62c196a40a889ea67b6518e575d 2024-12-05T00:27:51,603 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/aac3f55253ff6dfc9aa5bff573ffec89/info/39d4a62c196a40a889ea67b6518e575d, entries=13, sequenceid=82, filesize=18.5 K 2024-12-05T00:27:51,604 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~13.66 KB/13988, heapSize ~14.86 KB/15216, currentSize=13.66 KB/13988 for aac3f55253ff6dfc9aa5bff573ffec89 in 22ms, sequenceid=82, compaction requested=true 2024-12-05T00:27:51,604 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for aac3f55253ff6dfc9aa5bff573ffec89: 2024-12-05T00:27:51,604 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=83.1 K, sizeToCheck=16.0 K 2024-12-05T00:27:51,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45497 {}] regionserver.HRegion(8855): Flush requested on aac3f55253ff6dfc9aa5bff573ffec89 2024-12-05T00:27:51,604 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-05T00:27:51,604 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/aac3f55253ff6dfc9aa5bff573ffec89/info/a2b696823f9840bfb9f58e1aac5e630c because midkey is the same as first or last row 2024-12-05T00:27:51,605 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store aac3f55253ff6dfc9aa5bff573ffec89:info, priority=-2147483648, current under compaction store size is 1 2024-12-05T00:27:51,605 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-05T00:27:51,605 DEBUG [RS:0;2113c16e5528:45497-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-05T00:27:51,605 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing aac3f55253ff6dfc9aa5bff573ffec89 1/1 column families, dataSize=14.71 KB heapSize=16 KB 2024-12-05T00:27:51,606 DEBUG [RS:0;2113c16e5528:45497-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 85106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-05T00:27:51,606 DEBUG [RS:0;2113c16e5528:45497-shortCompactions-0 {}] regionserver.HStore(1541): aac3f55253ff6dfc9aa5bff573ffec89/info is initiating minor compaction (all files) 2024-12-05T00:27:51,606 INFO [RS:0;2113c16e5528:45497-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of aac3f55253ff6dfc9aa5bff573ffec89/info in TestLogRolling-testLogRolling,,1733358457366.aac3f55253ff6dfc9aa5bff573ffec89. 2024-12-05T00:27:51,606 INFO [RS:0;2113c16e5528:45497-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/aac3f55253ff6dfc9aa5bff573ffec89/info/a2b696823f9840bfb9f58e1aac5e630c, hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/aac3f55253ff6dfc9aa5bff573ffec89/info/f5cd93a32b054b8aa9e8d3b3e5473b1b, hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/aac3f55253ff6dfc9aa5bff573ffec89/info/39d4a62c196a40a889ea67b6518e575d] into tmpdir=hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/aac3f55253ff6dfc9aa5bff573ffec89/.tmp, totalSize=83.1 K 2024-12-05T00:27:51,607 DEBUG [RS:0;2113c16e5528:45497-shortCompactions-0 {}] compactions.Compactor(225): Compacting a2b696823f9840bfb9f58e1aac5e630c, keycount=37, bloomtype=ROW, size=43.9 K, encoding=NONE, compression=NONE, seqNum=47, earliestPutTs=1733358467467 2024-12-05T00:27:51,608 DEBUG [RS:0;2113c16e5528:45497-shortCompactions-0 {}] compactions.Compactor(225): Compacting f5cd93a32b054b8aa9e8d3b3e5473b1b, keycount=15, bloomtype=ROW, size=20.6 K, encoding=NONE, compression=NONE, seqNum=65, earliestPutTs=1733358469531 2024-12-05T00:27:51,608 DEBUG [RS:0;2113c16e5528:45497-shortCompactions-0 {}] compactions.Compactor(225): Compacting 39d4a62c196a40a889ea67b6518e575d, keycount=13, bloomtype=ROW, size=18.5 K, encoding=NONE, compression=NONE, seqNum=82, earliestPutTs=1733358469556 2024-12-05T00:27:51,609 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/aac3f55253ff6dfc9aa5bff573ffec89/.tmp/info/36320405a6e544068ff789ce51073ebc is 1080, key is row0066/info:/1733358471583/Put/seqid=0 2024-12-05T00:27:51,615 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44469 is added to blk_1073741843_1019 (size=20064) 2024-12-05T00:27:51,616 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36047 is added to blk_1073741843_1019 (size=20064) 2024-12-05T00:27:51,616 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=14.71 KB at sequenceid=99 (bloomFilter=true), to=hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/aac3f55253ff6dfc9aa5bff573ffec89/.tmp/info/36320405a6e544068ff789ce51073ebc 2024-12-05T00:27:51,622 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/aac3f55253ff6dfc9aa5bff573ffec89/.tmp/info/36320405a6e544068ff789ce51073ebc as hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/aac3f55253ff6dfc9aa5bff573ffec89/info/36320405a6e544068ff789ce51073ebc 2024-12-05T00:27:51,622 INFO [RS:0;2113c16e5528:45497-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): aac3f55253ff6dfc9aa5bff573ffec89#info#compaction#61 average throughput is 22.23 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-05T00:27:51,622 DEBUG [RS:0;2113c16e5528:45497-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/aac3f55253ff6dfc9aa5bff573ffec89/.tmp/info/f4813b19d3c1461babda1d90a4fc8f74 is 1080, key is row0001/info:/1733358467467/Put/seqid=0 2024-12-05T00:27:51,627 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36047 is added to blk_1073741844_1020 (size=75378) 2024-12-05T00:27:51,627 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44469 is added to blk_1073741844_1020 (size=75378) 2024-12-05T00:27:51,628 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/aac3f55253ff6dfc9aa5bff573ffec89/info/36320405a6e544068ff789ce51073ebc, entries=14, sequenceid=99, filesize=19.6 K 2024-12-05T00:27:51,629 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~14.71 KB/15064, heapSize ~15.98 KB/16368, currentSize=12.61 KB/12912 for aac3f55253ff6dfc9aa5bff573ffec89 in 24ms, sequenceid=99, compaction requested=false 2024-12-05T00:27:51,629 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for aac3f55253ff6dfc9aa5bff573ffec89: 2024-12-05T00:27:51,629 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=102.7 K, sizeToCheck=16.0 K 2024-12-05T00:27:51,629 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-05T00:27:51,629 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/aac3f55253ff6dfc9aa5bff573ffec89/info/a2b696823f9840bfb9f58e1aac5e630c because midkey is the same as first or last row 2024-12-05T00:27:51,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45497 {}] regionserver.HRegion(8855): Flush requested on aac3f55253ff6dfc9aa5bff573ffec89 2024-12-05T00:27:51,629 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing aac3f55253ff6dfc9aa5bff573ffec89 1/1 column families, dataSize=13.66 KB heapSize=14.88 KB 2024-12-05T00:27:51,633 DEBUG [RS:0;2113c16e5528:45497-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/aac3f55253ff6dfc9aa5bff573ffec89/.tmp/info/f4813b19d3c1461babda1d90a4fc8f74 as hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/aac3f55253ff6dfc9aa5bff573ffec89/info/f4813b19d3c1461babda1d90a4fc8f74 2024-12-05T00:27:51,634 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/aac3f55253ff6dfc9aa5bff573ffec89/.tmp/info/e63154bef1e04d629c303c2678652967 is 1080, key is row0080/info:/1733358471606/Put/seqid=0 2024-12-05T00:27:51,640 INFO [RS:0;2113c16e5528:45497-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in aac3f55253ff6dfc9aa5bff573ffec89/info of aac3f55253ff6dfc9aa5bff573ffec89 into f4813b19d3c1461babda1d90a4fc8f74(size=73.6 K), total size for store is 93.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-05T00:27:51,640 DEBUG [RS:0;2113c16e5528:45497-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for aac3f55253ff6dfc9aa5bff573ffec89: 2024-12-05T00:27:51,640 INFO [RS:0;2113c16e5528:45497-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1733358457366.aac3f55253ff6dfc9aa5bff573ffec89., storeName=aac3f55253ff6dfc9aa5bff573ffec89/info, priority=13, startTime=1733358471604; duration=0sec 2024-12-05T00:27:51,640 DEBUG [RS:0;2113c16e5528:45497-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=93.2 K, sizeToCheck=16.0 K 2024-12-05T00:27:51,640 DEBUG [RS:0;2113c16e5528:45497-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-05T00:27:51,640 DEBUG [RS:0;2113c16e5528:45497-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=93.2 K, sizeToCheck=16.0 K 2024-12-05T00:27:51,640 DEBUG [RS:0;2113c16e5528:45497-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-05T00:27:51,640 DEBUG [RS:0;2113c16e5528:45497-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=93.2 K, sizeToCheck=16.0 K 2024-12-05T00:27:51,640 DEBUG [RS:0;2113c16e5528:45497-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-05T00:27:51,641 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44469 is added to blk_1073741845_1021 (size=18987) 2024-12-05T00:27:51,641 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36047 is added to blk_1073741845_1021 (size=18987) 2024-12-05T00:27:51,641 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.66 KB at sequenceid=115 (bloomFilter=true), to=hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/aac3f55253ff6dfc9aa5bff573ffec89/.tmp/info/e63154bef1e04d629c303c2678652967 2024-12-05T00:27:51,642 DEBUG [RS:0;2113c16e5528:45497-shortCompactions-0 {}] regionserver.CompactSplit(239): Splitting TestLogRolling-testLogRolling,,1733358457366.aac3f55253ff6dfc9aa5bff573ffec89., compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-05T00:27:51,642 DEBUG [RS:0;2113c16e5528:45497-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-05T00:27:51,642 DEBUG [RS:0;2113c16e5528:45497-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: aac3f55253ff6dfc9aa5bff573ffec89:info 2024-12-05T00:27:51,643 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38761 {}] assignment.AssignmentManager(1363): Split request from 2113c16e5528,45497,1733358456539, parent={ENCODED => aac3f55253ff6dfc9aa5bff573ffec89, NAME => 'TestLogRolling-testLogRolling,,1733358457366.aac3f55253ff6dfc9aa5bff573ffec89.', STARTKEY => '', ENDKEY => ''}, splitKey=row0062 2024-12-05T00:27:51,648 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/aac3f55253ff6dfc9aa5bff573ffec89/.tmp/info/e63154bef1e04d629c303c2678652967 as hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/aac3f55253ff6dfc9aa5bff573ffec89/info/e63154bef1e04d629c303c2678652967 2024-12-05T00:27:51,649 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38761 {}] assignment.SplitTableRegionProcedure(223): Splittable=true state=OPEN, location=2113c16e5528,45497,1733358456539 2024-12-05T00:27:51,653 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/aac3f55253ff6dfc9aa5bff573ffec89/info/e63154bef1e04d629c303c2678652967, entries=13, sequenceid=115, filesize=18.5 K 2024-12-05T00:27:51,654 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~13.66 KB/13988, heapSize ~14.86 KB/15216, currentSize=4.20 KB/4304 for aac3f55253ff6dfc9aa5bff573ffec89 in 25ms, sequenceid=115, compaction requested=true 2024-12-05T00:27:51,655 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for aac3f55253ff6dfc9aa5bff573ffec89: 2024-12-05T00:27:51,655 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=111.7 K, sizeToCheck=16.0 K 2024-12-05T00:27:51,655 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-05T00:27:51,655 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=111.7 K, sizeToCheck=16.0 K 2024-12-05T00:27:51,655 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-05T00:27:51,655 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=111.7 K, sizeToCheck=16.0 K 2024-12-05T00:27:51,655 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-05T00:27:51,655 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(239): Splitting TestLogRolling-testLogRolling,,1733358457366.aac3f55253ff6dfc9aa5bff573ffec89., compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=1 2024-12-05T00:27:51,655 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38761 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=aac3f55253ff6dfc9aa5bff573ffec89, daughterA=f5a5551bdbe933a1af0f5b8ffbd7feee, daughterB=66ee8e24fad26a57122b2ec4f81ae2cf 2024-12-05T00:27:51,656 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=aac3f55253ff6dfc9aa5bff573ffec89, daughterA=f5a5551bdbe933a1af0f5b8ffbd7feee, daughterB=66ee8e24fad26a57122b2ec4f81ae2cf 2024-12-05T00:27:51,656 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=aac3f55253ff6dfc9aa5bff573ffec89, daughterA=f5a5551bdbe933a1af0f5b8ffbd7feee, daughterB=66ee8e24fad26a57122b2ec4f81ae2cf 2024-12-05T00:27:51,656 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=aac3f55253ff6dfc9aa5bff573ffec89, daughterA=f5a5551bdbe933a1af0f5b8ffbd7feee, daughterB=66ee8e24fad26a57122b2ec4f81ae2cf 2024-12-05T00:27:51,657 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38761 {}] assignment.AssignmentManager(1363): Split request from 2113c16e5528,45497,1733358456539, parent={ENCODED => aac3f55253ff6dfc9aa5bff573ffec89, NAME => 'TestLogRolling-testLogRolling,,1733358457366.aac3f55253ff6dfc9aa5bff573ffec89.', STARTKEY => '', ENDKEY => ''}, splitKey=row0062 2024-12-05T00:27:51,658 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38761 {}] assignment.SplitTableRegionProcedure(223): Splittable=true state=SPLITTING, location=2113c16e5528,45497,1733358456539 2024-12-05T00:27:51,659 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38761 {}] procedure2.ProcedureExecutor(1139): Stored pid=8, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=aac3f55253ff6dfc9aa5bff573ffec89, daughterA=4a6169c44fd20841625fef7b3156fe1e, daughterB=5960d802fd4321e57063b4c3bf9770ba 2024-12-05T00:27:51,659 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(842): Waiting on xlock for pid=8, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=aac3f55253ff6dfc9aa5bff573ffec89, daughterA=4a6169c44fd20841625fef7b3156fe1e, daughterB=5960d802fd4321e57063b4c3bf9770ba held by pid=7 2024-12-05T00:27:51,664 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=9, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=aac3f55253ff6dfc9aa5bff573ffec89, UNASSIGN}] 2024-12-05T00:27:51,667 DEBUG [PEWorker-3 {}] assignment.SplitTableRegionProcedure(162): LOCK_EVENT_WAIT SchemaLocking[serverLocks={},namespaceLocks={hbase=LockAndQueue[exclusiveLock=false,sharedLockCount=0,waitingProcCount=0]},tableLocks={hbase:meta=LockAndQueue[exclusiveLock=false,sharedLockCount=0,waitingProcCount=0]},regionLocks={},peerLocks={},metaLocks={hbase:meta=LockAndQueue[exclusiveLock=false,sharedLockCount=0,waitingProcCount=0]},globalLocks={}] 2024-12-05T00:27:51,667 DEBUG [PEWorker-3 {}] procedure2.ProcedureExecutor(1511): LOCK_EVENT_WAIT pid=8, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=aac3f55253ff6dfc9aa5bff573ffec89, daughterA=4a6169c44fd20841625fef7b3156fe1e, daughterB=5960d802fd4321e57063b4c3bf9770ba 2024-12-05T00:27:51,667 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=9, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=aac3f55253ff6dfc9aa5bff573ffec89, UNASSIGN 2024-12-05T00:27:51,669 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=9 updating hbase:meta row=aac3f55253ff6dfc9aa5bff573ffec89, regionState=CLOSING, regionLocation=2113c16e5528,45497,1733358456539 2024-12-05T00:27:51,672 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=9, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=aac3f55253ff6dfc9aa5bff573ffec89, UNASSIGN because future has completed 2024-12-05T00:27:51,672 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: true: evictOnSplit: true: evictOnClose: false 2024-12-05T00:27:51,673 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE, hasLock=false; CloseRegionProcedure aac3f55253ff6dfc9aa5bff573ffec89, server=2113c16e5528,45497,1733358456539}] 2024-12-05T00:27:51,831 INFO [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] handler.UnassignRegionHandler(122): Close aac3f55253ff6dfc9aa5bff573ffec89 2024-12-05T00:27:51,831 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] handler.UnassignRegionHandler(136): Unassign region: split region: true: evictCache: true 2024-12-05T00:27:51,831 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] regionserver.HRegion(1722): Closing aac3f55253ff6dfc9aa5bff573ffec89, disabling compactions & flushes 2024-12-05T00:27:51,831 INFO [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1733358457366.aac3f55253ff6dfc9aa5bff573ffec89. 2024-12-05T00:27:51,831 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1733358457366.aac3f55253ff6dfc9aa5bff573ffec89. 2024-12-05T00:27:51,832 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1733358457366.aac3f55253ff6dfc9aa5bff573ffec89. after waiting 0 ms 2024-12-05T00:27:51,832 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1733358457366.aac3f55253ff6dfc9aa5bff573ffec89. 2024-12-05T00:27:51,832 INFO [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] regionserver.HRegion(2902): Flushing aac3f55253ff6dfc9aa5bff573ffec89 1/1 column families, dataSize=4.20 KB heapSize=4.75 KB 2024-12-05T00:27:51,836 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/aac3f55253ff6dfc9aa5bff573ffec89/.tmp/info/b103bb3aa9654ef78cae7f2cfed4db0b is 1080, key is row0093/info:/1733358471630/Put/seqid=0 2024-12-05T00:27:51,841 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44469 is added to blk_1073741846_1022 (size=9270) 2024-12-05T00:27:51,841 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36047 is added to blk_1073741846_1022 (size=9270) 2024-12-05T00:27:51,842 INFO [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.20 KB at sequenceid=123 (bloomFilter=true), to=hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/aac3f55253ff6dfc9aa5bff573ffec89/.tmp/info/b103bb3aa9654ef78cae7f2cfed4db0b 2024-12-05T00:27:51,847 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/aac3f55253ff6dfc9aa5bff573ffec89/.tmp/info/b103bb3aa9654ef78cae7f2cfed4db0b as hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/aac3f55253ff6dfc9aa5bff573ffec89/info/b103bb3aa9654ef78cae7f2cfed4db0b 2024-12-05T00:27:51,851 INFO [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/aac3f55253ff6dfc9aa5bff573ffec89/info/b103bb3aa9654ef78cae7f2cfed4db0b, entries=4, sequenceid=123, filesize=9.1 K 2024-12-05T00:27:51,853 INFO [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] regionserver.HRegion(3140): Finished flush of dataSize ~4.20 KB/4304, heapSize ~4.73 KB/4848, currentSize=0 B/0 for aac3f55253ff6dfc9aa5bff573ffec89 in 20ms, sequenceid=123, compaction requested=true 2024-12-05T00:27:51,854 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733358457366.aac3f55253ff6dfc9aa5bff573ffec89.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/aac3f55253ff6dfc9aa5bff573ffec89/info/7e30f5b4090f42febe7fee906e320fad, hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/aac3f55253ff6dfc9aa5bff573ffec89/info/96ea172a18094be6b34229b2f4b86d59, hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/aac3f55253ff6dfc9aa5bff573ffec89/info/a2b696823f9840bfb9f58e1aac5e630c, hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/aac3f55253ff6dfc9aa5bff573ffec89/info/3cd9ef399f944f2887857e37ce16522e, hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/aac3f55253ff6dfc9aa5bff573ffec89/info/f5cd93a32b054b8aa9e8d3b3e5473b1b, hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/aac3f55253ff6dfc9aa5bff573ffec89/info/39d4a62c196a40a889ea67b6518e575d] to archive 2024-12-05T00:27:51,854 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733358457366.aac3f55253ff6dfc9aa5bff573ffec89.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-05T00:27:51,856 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733358457366.aac3f55253ff6dfc9aa5bff573ffec89.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/aac3f55253ff6dfc9aa5bff573ffec89/info/7e30f5b4090f42febe7fee906e320fad to hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/archive/data/default/TestLogRolling-testLogRolling/aac3f55253ff6dfc9aa5bff573ffec89/info/7e30f5b4090f42febe7fee906e320fad 2024-12-05T00:27:51,857 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733358457366.aac3f55253ff6dfc9aa5bff573ffec89.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/aac3f55253ff6dfc9aa5bff573ffec89/info/96ea172a18094be6b34229b2f4b86d59 to hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/archive/data/default/TestLogRolling-testLogRolling/aac3f55253ff6dfc9aa5bff573ffec89/info/96ea172a18094be6b34229b2f4b86d59 2024-12-05T00:27:51,858 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733358457366.aac3f55253ff6dfc9aa5bff573ffec89.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/aac3f55253ff6dfc9aa5bff573ffec89/info/a2b696823f9840bfb9f58e1aac5e630c to hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/archive/data/default/TestLogRolling-testLogRolling/aac3f55253ff6dfc9aa5bff573ffec89/info/a2b696823f9840bfb9f58e1aac5e630c 2024-12-05T00:27:51,859 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733358457366.aac3f55253ff6dfc9aa5bff573ffec89.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/aac3f55253ff6dfc9aa5bff573ffec89/info/3cd9ef399f944f2887857e37ce16522e to hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/archive/data/default/TestLogRolling-testLogRolling/aac3f55253ff6dfc9aa5bff573ffec89/info/3cd9ef399f944f2887857e37ce16522e 2024-12-05T00:27:51,861 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733358457366.aac3f55253ff6dfc9aa5bff573ffec89.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/aac3f55253ff6dfc9aa5bff573ffec89/info/f5cd93a32b054b8aa9e8d3b3e5473b1b to hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/archive/data/default/TestLogRolling-testLogRolling/aac3f55253ff6dfc9aa5bff573ffec89/info/f5cd93a32b054b8aa9e8d3b3e5473b1b 2024-12-05T00:27:51,862 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733358457366.aac3f55253ff6dfc9aa5bff573ffec89.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/aac3f55253ff6dfc9aa5bff573ffec89/info/39d4a62c196a40a889ea67b6518e575d to hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/archive/data/default/TestLogRolling-testLogRolling/aac3f55253ff6dfc9aa5bff573ffec89/info/39d4a62c196a40a889ea67b6518e575d 2024-12-05T00:27:51,868 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/aac3f55253ff6dfc9aa5bff573ffec89/recovered.edits/126.seqid, newMaxSeqId=126, maxSeqId=1 2024-12-05T00:27:51,869 INFO [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1733358457366.aac3f55253ff6dfc9aa5bff573ffec89. 2024-12-05T00:27:51,869 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] regionserver.HRegion(1676): Region close journal for aac3f55253ff6dfc9aa5bff573ffec89: Waiting for close lock at 1733358471831Running coprocessor pre-close hooks at 1733358471831Disabling compacts and flushes for region at 1733358471831Disabling writes for close at 1733358471832 (+1 ms)Obtaining lock to block concurrent updates at 1733358471832Preparing flush snapshotting stores in aac3f55253ff6dfc9aa5bff573ffec89 at 1733358471832Finished memstore snapshotting TestLogRolling-testLogRolling,,1733358457366.aac3f55253ff6dfc9aa5bff573ffec89., syncing WAL and waiting on mvcc, flushsize=dataSize=4304, getHeapSize=4848, getOffHeapSize=0, getCellsCount=4 at 1733358471832Flushing stores of TestLogRolling-testLogRolling,,1733358457366.aac3f55253ff6dfc9aa5bff573ffec89. at 1733358471833 (+1 ms)Flushing aac3f55253ff6dfc9aa5bff573ffec89/info: creating writer at 1733358471833Flushing aac3f55253ff6dfc9aa5bff573ffec89/info: appending metadata at 1733358471836 (+3 ms)Flushing aac3f55253ff6dfc9aa5bff573ffec89/info: closing flushed file at 1733358471836Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@32d2d11d: reopening flushed file at 1733358471846 (+10 ms)Finished flush of dataSize ~4.20 KB/4304, heapSize ~4.73 KB/4848, currentSize=0 B/0 for aac3f55253ff6dfc9aa5bff573ffec89 in 20ms, sequenceid=123, compaction requested=true at 1733358471853 (+7 ms)Writing region close event to WAL at 1733358471865 (+12 ms)Running coprocessor post-close hooks at 1733358471869 (+4 ms)Closed at 1733358471869 2024-12-05T00:27:51,871 INFO [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] handler.UnassignRegionHandler(157): Closed aac3f55253ff6dfc9aa5bff573ffec89 2024-12-05T00:27:51,871 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=9 updating hbase:meta row=aac3f55253ff6dfc9aa5bff573ffec89, regionState=CLOSED 2024-12-05T00:27:51,874 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=10, ppid=9, state=RUNNABLE, hasLock=false; CloseRegionProcedure aac3f55253ff6dfc9aa5bff573ffec89, server=2113c16e5528,45497,1733358456539 because future has completed 2024-12-05T00:27:51,876 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=10, resume processing ppid=9 2024-12-05T00:27:51,876 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=9, state=SUCCESS, hasLock=false; CloseRegionProcedure aac3f55253ff6dfc9aa5bff573ffec89, server=2113c16e5528,45497,1733358456539 in 202 msec 2024-12-05T00:27:51,879 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=9, resume processing ppid=7 2024-12-05T00:27:51,879 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=aac3f55253ff6dfc9aa5bff573ffec89, UNASSIGN in 212 msec 2024-12-05T00:27:51,886 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T00:27:51,889 INFO [PEWorker-5 {}] assignment.SplitTableRegionProcedure(728): pid=7 splitting 4 storefiles, region=aac3f55253ff6dfc9aa5bff573ffec89, threads=4 2024-12-05T00:27:51,891 DEBUG [StoreFileSplitter-pool-1 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/aac3f55253ff6dfc9aa5bff573ffec89/info/b103bb3aa9654ef78cae7f2cfed4db0b for region: aac3f55253ff6dfc9aa5bff573ffec89 2024-12-05T00:27:51,891 DEBUG [StoreFileSplitter-pool-2 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/aac3f55253ff6dfc9aa5bff573ffec89/info/e63154bef1e04d629c303c2678652967 for region: aac3f55253ff6dfc9aa5bff573ffec89 2024-12-05T00:27:51,891 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/aac3f55253ff6dfc9aa5bff573ffec89/info/36320405a6e544068ff789ce51073ebc for region: aac3f55253ff6dfc9aa5bff573ffec89 2024-12-05T00:27:51,891 DEBUG [StoreFileSplitter-pool-3 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/aac3f55253ff6dfc9aa5bff573ffec89/info/f4813b19d3c1461babda1d90a4fc8f74 for region: aac3f55253ff6dfc9aa5bff573ffec89 2024-12-05T00:27:51,901 DEBUG [StoreFileSplitter-pool-1 {}] regionserver.HRegionFileSystem(650): Will create HFileLink file for hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/aac3f55253ff6dfc9aa5bff573ffec89/info/b103bb3aa9654ef78cae7f2cfed4db0b, top=true 2024-12-05T00:27:51,902 DEBUG [StoreFileSplitter-pool-0 {}] regionserver.HRegionFileSystem(650): Will create HFileLink file for hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/aac3f55253ff6dfc9aa5bff573ffec89/info/36320405a6e544068ff789ce51073ebc, top=true 2024-12-05T00:27:51,902 DEBUG [StoreFileSplitter-pool-2 {}] regionserver.HRegionFileSystem(650): Will create HFileLink file for hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/aac3f55253ff6dfc9aa5bff573ffec89/info/e63154bef1e04d629c303c2678652967, top=true 2024-12-05T00:27:51,909 INFO [StoreFileSplitter-pool-1 {}] regionserver.HRegionFileSystem(691): Created linkFile:hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/66ee8e24fad26a57122b2ec4f81ae2cf/info/TestLogRolling-testLogRolling=aac3f55253ff6dfc9aa5bff573ffec89-b103bb3aa9654ef78cae7f2cfed4db0b for child: 66ee8e24fad26a57122b2ec4f81ae2cf, parent: aac3f55253ff6dfc9aa5bff573ffec89 2024-12-05T00:27:51,909 INFO [StoreFileSplitter-pool-0 {}] regionserver.HRegionFileSystem(691): Created linkFile:hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/66ee8e24fad26a57122b2ec4f81ae2cf/info/TestLogRolling-testLogRolling=aac3f55253ff6dfc9aa5bff573ffec89-36320405a6e544068ff789ce51073ebc for child: 66ee8e24fad26a57122b2ec4f81ae2cf, parent: aac3f55253ff6dfc9aa5bff573ffec89 2024-12-05T00:27:51,909 DEBUG [StoreFileSplitter-pool-1 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/aac3f55253ff6dfc9aa5bff573ffec89/info/b103bb3aa9654ef78cae7f2cfed4db0b for region: aac3f55253ff6dfc9aa5bff573ffec89 2024-12-05T00:27:51,909 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/aac3f55253ff6dfc9aa5bff573ffec89/info/36320405a6e544068ff789ce51073ebc for region: aac3f55253ff6dfc9aa5bff573ffec89 2024-12-05T00:27:51,913 INFO [StoreFileSplitter-pool-2 {}] regionserver.HRegionFileSystem(691): Created linkFile:hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/66ee8e24fad26a57122b2ec4f81ae2cf/info/TestLogRolling-testLogRolling=aac3f55253ff6dfc9aa5bff573ffec89-e63154bef1e04d629c303c2678652967 for child: 66ee8e24fad26a57122b2ec4f81ae2cf, parent: aac3f55253ff6dfc9aa5bff573ffec89 2024-12-05T00:27:51,913 DEBUG [StoreFileSplitter-pool-2 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/aac3f55253ff6dfc9aa5bff573ffec89/info/e63154bef1e04d629c303c2678652967 for region: aac3f55253ff6dfc9aa5bff573ffec89 2024-12-05T00:27:51,922 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44469 is added to blk_1073741847_1023 (size=27) 2024-12-05T00:27:51,922 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36047 is added to blk_1073741847_1023 (size=27) 2024-12-05T00:27:51,931 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36047 is added to blk_1073741848_1024 (size=27) 2024-12-05T00:27:51,931 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44469 is added to blk_1073741848_1024 (size=27) 2024-12-05T00:27:51,935 DEBUG [StoreFileSplitter-pool-3 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/aac3f55253ff6dfc9aa5bff573ffec89/info/f4813b19d3c1461babda1d90a4fc8f74 for region: aac3f55253ff6dfc9aa5bff573ffec89 2024-12-05T00:27:51,938 DEBUG [PEWorker-5 {}] assignment.SplitTableRegionProcedure(802): pid=7 split storefiles for region aac3f55253ff6dfc9aa5bff573ffec89 Daughter A: [hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/f5a5551bdbe933a1af0f5b8ffbd7feee/info/f4813b19d3c1461babda1d90a4fc8f74.aac3f55253ff6dfc9aa5bff573ffec89] storefiles, Daughter B: [hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/66ee8e24fad26a57122b2ec4f81ae2cf/info/TestLogRolling-testLogRolling=aac3f55253ff6dfc9aa5bff573ffec89-36320405a6e544068ff789ce51073ebc, hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/66ee8e24fad26a57122b2ec4f81ae2cf/info/TestLogRolling-testLogRolling=aac3f55253ff6dfc9aa5bff573ffec89-b103bb3aa9654ef78cae7f2cfed4db0b, hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/66ee8e24fad26a57122b2ec4f81ae2cf/info/TestLogRolling-testLogRolling=aac3f55253ff6dfc9aa5bff573ffec89-e63154bef1e04d629c303c2678652967, hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/66ee8e24fad26a57122b2ec4f81ae2cf/info/f4813b19d3c1461babda1d90a4fc8f74.aac3f55253ff6dfc9aa5bff573ffec89] storefiles. 2024-12-05T00:27:51,946 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36047 is added to blk_1073741849_1025 (size=71) 2024-12-05T00:27:51,946 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44469 is added to blk_1073741849_1025 (size=71) 2024-12-05T00:27:51,948 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T00:27:51,960 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44469 is added to blk_1073741850_1026 (size=71) 2024-12-05T00:27:51,961 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36047 is added to blk_1073741850_1026 (size=71) 2024-12-05T00:27:51,962 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T00:27:51,973 DEBUG [PEWorker-5 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/f5a5551bdbe933a1af0f5b8ffbd7feee/recovered.edits/126.seqid, newMaxSeqId=126, maxSeqId=-1 2024-12-05T00:27:51,975 DEBUG [PEWorker-5 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/66ee8e24fad26a57122b2ec4f81ae2cf/recovered.edits/126.seqid, newMaxSeqId=126, maxSeqId=-1 2024-12-05T00:27:51,978 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,,1733358457366.aac3f55253ff6dfc9aa5bff573ffec89.","families":{"info":[{"qualifier":"regioninfo","vlen":63,"tag":[],"timestamp":"1733358471977"},{"qualifier":"splitA","vlen":70,"tag":[],"timestamp":"1733358471977"},{"qualifier":"splitB","vlen":70,"tag":[],"timestamp":"1733358471977"}]},"ts":"1733358471977"} 2024-12-05T00:27:51,978 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,,1733358471649.f5a5551bdbe933a1af0f5b8ffbd7feee.","families":{"info":[{"qualifier":"regioninfo","vlen":70,"tag":[],"timestamp":"1733358471977"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733358471977"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1733358471977"}]},"ts":"1733358471977"} 2024-12-05T00:27:51,978 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,row0062,1733358471649.66ee8e24fad26a57122b2ec4f81ae2cf.","families":{"info":[{"qualifier":"regioninfo","vlen":70,"tag":[],"timestamp":"1733358471977"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733358471977"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1733358471977"}]},"ts":"1733358471977"} 2024-12-05T00:27:51,996 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=f5a5551bdbe933a1af0f5b8ffbd7feee, ASSIGN}, {pid=12, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=66ee8e24fad26a57122b2ec4f81ae2cf, ASSIGN}] 2024-12-05T00:27:51,997 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=12, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=66ee8e24fad26a57122b2ec4f81ae2cf, ASSIGN 2024-12-05T00:27:51,997 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=f5a5551bdbe933a1af0f5b8ffbd7feee, ASSIGN 2024-12-05T00:27:51,998 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=12, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=66ee8e24fad26a57122b2ec4f81ae2cf, ASSIGN; state=SPLITTING_NEW, location=2113c16e5528,45497,1733358456539; forceNewPlan=false, retain=false 2024-12-05T00:27:51,998 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=f5a5551bdbe933a1af0f5b8ffbd7feee, ASSIGN; state=SPLITTING_NEW, location=2113c16e5528,45497,1733358456539; forceNewPlan=false, retain=false 2024-12-05T00:27:52,148 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=11 updating hbase:meta row=f5a5551bdbe933a1af0f5b8ffbd7feee, regionState=OPENING, regionLocation=2113c16e5528,45497,1733358456539 2024-12-05T00:27:52,149 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=12 updating hbase:meta row=66ee8e24fad26a57122b2ec4f81ae2cf, regionState=OPENING, regionLocation=2113c16e5528,45497,1733358456539 2024-12-05T00:27:52,151 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=12, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=66ee8e24fad26a57122b2ec4f81ae2cf, ASSIGN because future has completed 2024-12-05T00:27:52,151 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=13, ppid=12, state=RUNNABLE, hasLock=false; OpenRegionProcedure 66ee8e24fad26a57122b2ec4f81ae2cf, server=2113c16e5528,45497,1733358456539}] 2024-12-05T00:27:52,152 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=f5a5551bdbe933a1af0f5b8ffbd7feee, ASSIGN because future has completed 2024-12-05T00:27:52,152 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=14, ppid=11, state=RUNNABLE, hasLock=false; OpenRegionProcedure f5a5551bdbe933a1af0f5b8ffbd7feee, server=2113c16e5528,45497,1733358456539}] 2024-12-05T00:27:52,240 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,37749,1733358324931/2113c16e5528%2C37749%2C1733358324931.meta.1733358325913.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:27:52,240 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,45031,1733358326108/2113c16e5528%2C45031%2C1733358326108.1733358326307 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:27:52,307 INFO [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,,1733358471649.f5a5551bdbe933a1af0f5b8ffbd7feee. 2024-12-05T00:27:52,307 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegion(7752): Opening region: {ENCODED => f5a5551bdbe933a1af0f5b8ffbd7feee, NAME => 'TestLogRolling-testLogRolling,,1733358471649.f5a5551bdbe933a1af0f5b8ffbd7feee.', STARTKEY => '', ENDKEY => 'row0062'} 2024-12-05T00:27:52,307 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling f5a5551bdbe933a1af0f5b8ffbd7feee 2024-12-05T00:27:52,307 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1733358471649.f5a5551bdbe933a1af0f5b8ffbd7feee.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T00:27:52,308 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegion(7794): checking encryption for f5a5551bdbe933a1af0f5b8ffbd7feee 2024-12-05T00:27:52,308 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegion(7797): checking classloading for f5a5551bdbe933a1af0f5b8ffbd7feee 2024-12-05T00:27:52,309 INFO [StoreOpener-f5a5551bdbe933a1af0f5b8ffbd7feee-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region f5a5551bdbe933a1af0f5b8ffbd7feee 2024-12-05T00:27:52,310 INFO [StoreOpener-f5a5551bdbe933a1af0f5b8ffbd7feee-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region f5a5551bdbe933a1af0f5b8ffbd7feee columnFamilyName info 2024-12-05T00:27:52,310 DEBUG [StoreOpener-f5a5551bdbe933a1af0f5b8ffbd7feee-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T00:27:52,320 DEBUG [StoreOpener-f5a5551bdbe933a1af0f5b8ffbd7feee-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/f5a5551bdbe933a1af0f5b8ffbd7feee/info/f4813b19d3c1461babda1d90a4fc8f74.aac3f55253ff6dfc9aa5bff573ffec89->hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/aac3f55253ff6dfc9aa5bff573ffec89/info/f4813b19d3c1461babda1d90a4fc8f74-bottom 2024-12-05T00:27:52,321 INFO [StoreOpener-f5a5551bdbe933a1af0f5b8ffbd7feee-1 {}] regionserver.HStore(327): Store=f5a5551bdbe933a1af0f5b8ffbd7feee/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T00:27:52,321 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegion(1038): replaying wal for f5a5551bdbe933a1af0f5b8ffbd7feee 2024-12-05T00:27:52,322 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/f5a5551bdbe933a1af0f5b8ffbd7feee 2024-12-05T00:27:52,323 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/f5a5551bdbe933a1af0f5b8ffbd7feee 2024-12-05T00:27:52,323 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegion(1048): stopping wal replay for f5a5551bdbe933a1af0f5b8ffbd7feee 2024-12-05T00:27:52,323 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegion(1060): Cleaning up temporary data for f5a5551bdbe933a1af0f5b8ffbd7feee 2024-12-05T00:27:52,325 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegion(1093): writing seq id for f5a5551bdbe933a1af0f5b8ffbd7feee 2024-12-05T00:27:52,325 INFO [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegion(1114): Opened f5a5551bdbe933a1af0f5b8ffbd7feee; next sequenceid=127; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=742750, jitterRate=-0.05554533004760742}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-05T00:27:52,326 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegion(1122): Running coprocessor post-open hooks for f5a5551bdbe933a1af0f5b8ffbd7feee 2024-12-05T00:27:52,326 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegion(1006): Region open journal for f5a5551bdbe933a1af0f5b8ffbd7feee: Running coprocessor pre-open hook at 1733358472308Writing region info on filesystem at 1733358472308Initializing all the Stores at 1733358472308Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733358472308Cleaning up temporary data from old regions at 1733358472323 (+15 ms)Running coprocessor post-open hooks at 1733358472326 (+3 ms)Region opened successfully at 1733358472326 2024-12-05T00:27:52,327 INFO [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,,1733358471649.f5a5551bdbe933a1af0f5b8ffbd7feee., pid=14, masterSystemTime=1733358472303 2024-12-05T00:27:52,327 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.CompactSplit(403): Add compact mark for store f5a5551bdbe933a1af0f5b8ffbd7feee:info, priority=-2147483648, current under compaction store size is 1 2024-12-05T00:27:52,327 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: Opening Region; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-05T00:27:52,327 DEBUG [RS:0;2113c16e5528:45497-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 1 store files, 0 compacting, 1 eligible, 16 blocking 2024-12-05T00:27:52,328 INFO [RS:0;2113c16e5528:45497-shortCompactions-0 {}] regionserver.HStore(1527): Keeping/Overriding Compaction request priority to -2147482648 for CF info since it belongs to recently split daughter region TestLogRolling-testLogRolling,,1733358471649.f5a5551bdbe933a1af0f5b8ffbd7feee. 2024-12-05T00:27:52,328 DEBUG [RS:0;2113c16e5528:45497-shortCompactions-0 {}] regionserver.HStore(1541): f5a5551bdbe933a1af0f5b8ffbd7feee/info is initiating minor compaction (all files) 2024-12-05T00:27:52,328 INFO [RS:0;2113c16e5528:45497-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of f5a5551bdbe933a1af0f5b8ffbd7feee/info in TestLogRolling-testLogRolling,,1733358471649.f5a5551bdbe933a1af0f5b8ffbd7feee. 2024-12-05T00:27:52,328 INFO [RS:0;2113c16e5528:45497-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/f5a5551bdbe933a1af0f5b8ffbd7feee/info/f4813b19d3c1461babda1d90a4fc8f74.aac3f55253ff6dfc9aa5bff573ffec89->hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/aac3f55253ff6dfc9aa5bff573ffec89/info/f4813b19d3c1461babda1d90a4fc8f74-bottom] into tmpdir=hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/f5a5551bdbe933a1af0f5b8ffbd7feee/.tmp, totalSize=73.6 K 2024-12-05T00:27:52,329 DEBUG [RS:0;2113c16e5528:45497-shortCompactions-0 {}] compactions.Compactor(225): Compacting f4813b19d3c1461babda1d90a4fc8f74.aac3f55253ff6dfc9aa5bff573ffec89, keycount=32, bloomtype=ROW, size=73.6 K, encoding=NONE, compression=NONE, seqNum=82, earliestPutTs=1733358467467 2024-12-05T00:27:52,329 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,,1733358471649.f5a5551bdbe933a1af0f5b8ffbd7feee. 2024-12-05T00:27:52,329 INFO [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,,1733358471649.f5a5551bdbe933a1af0f5b8ffbd7feee. 2024-12-05T00:27:52,330 INFO [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,row0062,1733358471649.66ee8e24fad26a57122b2ec4f81ae2cf. 2024-12-05T00:27:52,330 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7752): Opening region: {ENCODED => 66ee8e24fad26a57122b2ec4f81ae2cf, NAME => 'TestLogRolling-testLogRolling,row0062,1733358471649.66ee8e24fad26a57122b2ec4f81ae2cf.', STARTKEY => 'row0062', ENDKEY => ''} 2024-12-05T00:27:52,330 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling 66ee8e24fad26a57122b2ec4f81ae2cf 2024-12-05T00:27:52,330 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,row0062,1733358471649.66ee8e24fad26a57122b2ec4f81ae2cf.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T00:27:52,330 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7794): checking encryption for 66ee8e24fad26a57122b2ec4f81ae2cf 2024-12-05T00:27:52,330 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7797): checking classloading for 66ee8e24fad26a57122b2ec4f81ae2cf 2024-12-05T00:27:52,330 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=11 updating hbase:meta row=f5a5551bdbe933a1af0f5b8ffbd7feee, regionState=OPEN, openSeqNum=127, regionLocation=2113c16e5528,45497,1733358456539 2024-12-05T00:27:52,331 INFO [StoreOpener-66ee8e24fad26a57122b2ec4f81ae2cf-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 66ee8e24fad26a57122b2ec4f81ae2cf 2024-12-05T00:27:52,332 INFO [StoreOpener-66ee8e24fad26a57122b2ec4f81ae2cf-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 66ee8e24fad26a57122b2ec4f81ae2cf columnFamilyName info 2024-12-05T00:27:52,332 DEBUG [StoreOpener-66ee8e24fad26a57122b2ec4f81ae2cf-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T00:27:52,332 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45497 {}] regionserver.HRegion(8855): Flush requested on 1588230740 2024-12-05T00:27:52,332 DEBUG [MemStoreFlusher.0 {}] regionserver.FlushAllLargeStoresPolicy(69): Since none of the CFs were above the size, flushing all. 2024-12-05T00:27:52,333 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=5.11 KB heapSize=8.96 KB 2024-12-05T00:27:52,333 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=14, ppid=11, state=RUNNABLE, hasLock=false; OpenRegionProcedure f5a5551bdbe933a1af0f5b8ffbd7feee, server=2113c16e5528,45497,1733358456539 because future has completed 2024-12-05T00:27:52,337 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=14, resume processing ppid=11 2024-12-05T00:27:52,337 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=14, ppid=11, state=SUCCESS, hasLock=false; OpenRegionProcedure f5a5551bdbe933a1af0f5b8ffbd7feee, server=2113c16e5528,45497,1733358456539 in 182 msec 2024-12-05T00:27:52,339 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=f5a5551bdbe933a1af0f5b8ffbd7feee, ASSIGN in 342 msec 2024-12-05T00:27:52,342 DEBUG [StoreOpener-66ee8e24fad26a57122b2ec4f81ae2cf-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/66ee8e24fad26a57122b2ec4f81ae2cf/info/TestLogRolling-testLogRolling=aac3f55253ff6dfc9aa5bff573ffec89-36320405a6e544068ff789ce51073ebc 2024-12-05T00:27:52,346 DEBUG [StoreOpener-66ee8e24fad26a57122b2ec4f81ae2cf-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/66ee8e24fad26a57122b2ec4f81ae2cf/info/TestLogRolling-testLogRolling=aac3f55253ff6dfc9aa5bff573ffec89-b103bb3aa9654ef78cae7f2cfed4db0b 2024-12-05T00:27:52,350 DEBUG [StoreOpener-66ee8e24fad26a57122b2ec4f81ae2cf-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/66ee8e24fad26a57122b2ec4f81ae2cf/info/TestLogRolling-testLogRolling=aac3f55253ff6dfc9aa5bff573ffec89-e63154bef1e04d629c303c2678652967 2024-12-05T00:27:52,350 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/hbase/meta/1588230740/.tmp/info/7bab82d9a0a84f68a19b5130191d7995 is 193, key is TestLogRolling-testLogRolling,row0062,1733358471649.66ee8e24fad26a57122b2ec4f81ae2cf./info:regioninfo/1733358472148/Put/seqid=0 2024-12-05T00:27:52,352 INFO [RS:0;2113c16e5528:45497-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): f5a5551bdbe933a1af0f5b8ffbd7feee#info#compaction#64 average throughput is 20.87 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-05T00:27:52,353 DEBUG [RS:0;2113c16e5528:45497-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/f5a5551bdbe933a1af0f5b8ffbd7feee/.tmp/info/ee0265f163ca434aa2e3a1a799d2b0cd is 1080, key is row0001/info:/1733358467467/Put/seqid=0 2024-12-05T00:27:52,355 DEBUG [StoreOpener-66ee8e24fad26a57122b2ec4f81ae2cf-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/66ee8e24fad26a57122b2ec4f81ae2cf/info/f4813b19d3c1461babda1d90a4fc8f74.aac3f55253ff6dfc9aa5bff573ffec89->hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/aac3f55253ff6dfc9aa5bff573ffec89/info/f4813b19d3c1461babda1d90a4fc8f74-top 2024-12-05T00:27:52,355 INFO [StoreOpener-66ee8e24fad26a57122b2ec4f81ae2cf-1 {}] regionserver.HStore(327): Store=66ee8e24fad26a57122b2ec4f81ae2cf/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T00:27:52,355 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1038): replaying wal for 66ee8e24fad26a57122b2ec4f81ae2cf 2024-12-05T00:27:52,356 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44469 is added to blk_1073741851_1027 (size=9847) 2024-12-05T00:27:52,356 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36047 is added to blk_1073741851_1027 (size=9847) 2024-12-05T00:27:52,356 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/66ee8e24fad26a57122b2ec4f81ae2cf 2024-12-05T00:27:52,357 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36047 is added to blk_1073741852_1028 (size=70862) 2024-12-05T00:27:52,357 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44469 is added to blk_1073741852_1028 (size=70862) 2024-12-05T00:27:52,357 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.92 KB at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/hbase/meta/1588230740/.tmp/info/7bab82d9a0a84f68a19b5130191d7995 2024-12-05T00:27:52,358 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/66ee8e24fad26a57122b2ec4f81ae2cf 2024-12-05T00:27:52,358 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1048): stopping wal replay for 66ee8e24fad26a57122b2ec4f81ae2cf 2024-12-05T00:27:52,358 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1060): Cleaning up temporary data for 66ee8e24fad26a57122b2ec4f81ae2cf 2024-12-05T00:27:52,360 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1093): writing seq id for 66ee8e24fad26a57122b2ec4f81ae2cf 2024-12-05T00:27:52,361 INFO [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1114): Opened 66ee8e24fad26a57122b2ec4f81ae2cf; next sequenceid=127; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=806567, jitterRate=0.025603994727134705}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-05T00:27:52,361 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 66ee8e24fad26a57122b2ec4f81ae2cf 2024-12-05T00:27:52,361 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1006): Region open journal for 66ee8e24fad26a57122b2ec4f81ae2cf: Running coprocessor pre-open hook at 1733358472330Writing region info on filesystem at 1733358472330Initializing all the Stores at 1733358472331 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733358472331Cleaning up temporary data from old regions at 1733358472358 (+27 ms)Running coprocessor post-open hooks at 1733358472361 (+3 ms)Region opened successfully at 1733358472361 2024-12-05T00:27:52,362 INFO [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,row0062,1733358471649.66ee8e24fad26a57122b2ec4f81ae2cf., pid=13, masterSystemTime=1733358472303 2024-12-05T00:27:52,363 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.CompactSplit(403): Add compact mark for store 66ee8e24fad26a57122b2ec4f81ae2cf:info, priority=-2147483648, current under compaction store size is 2 2024-12-05T00:27:52,363 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: Opening Region; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-05T00:27:52,363 DEBUG [RS:0;2113c16e5528:45497-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-05T00:27:52,363 DEBUG [RS:0;2113c16e5528:45497-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/f5a5551bdbe933a1af0f5b8ffbd7feee/.tmp/info/ee0265f163ca434aa2e3a1a799d2b0cd as hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/f5a5551bdbe933a1af0f5b8ffbd7feee/info/ee0265f163ca434aa2e3a1a799d2b0cd 2024-12-05T00:27:52,365 INFO [RS:0;2113c16e5528:45497-longCompactions-0 {}] regionserver.HStore(1527): Keeping/Overriding Compaction request priority to -2147482648 for CF info since it belongs to recently split daughter region TestLogRolling-testLogRolling,row0062,1733358471649.66ee8e24fad26a57122b2ec4f81ae2cf. 2024-12-05T00:27:52,365 DEBUG [RS:0;2113c16e5528:45497-longCompactions-0 {}] regionserver.HStore(1541): 66ee8e24fad26a57122b2ec4f81ae2cf/info is initiating minor compaction (all files) 2024-12-05T00:27:52,365 INFO [RS:0;2113c16e5528:45497-longCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 66ee8e24fad26a57122b2ec4f81ae2cf/info in TestLogRolling-testLogRolling,row0062,1733358471649.66ee8e24fad26a57122b2ec4f81ae2cf. 2024-12-05T00:27:52,365 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,row0062,1733358471649.66ee8e24fad26a57122b2ec4f81ae2cf. 2024-12-05T00:27:52,365 INFO [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,row0062,1733358471649.66ee8e24fad26a57122b2ec4f81ae2cf. 2024-12-05T00:27:52,365 INFO [RS:0;2113c16e5528:45497-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/66ee8e24fad26a57122b2ec4f81ae2cf/info/f4813b19d3c1461babda1d90a4fc8f74.aac3f55253ff6dfc9aa5bff573ffec89->hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/aac3f55253ff6dfc9aa5bff573ffec89/info/f4813b19d3c1461babda1d90a4fc8f74-top, hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/66ee8e24fad26a57122b2ec4f81ae2cf/info/TestLogRolling-testLogRolling=aac3f55253ff6dfc9aa5bff573ffec89-36320405a6e544068ff789ce51073ebc, hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/66ee8e24fad26a57122b2ec4f81ae2cf/info/TestLogRolling-testLogRolling=aac3f55253ff6dfc9aa5bff573ffec89-e63154bef1e04d629c303c2678652967, hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/66ee8e24fad26a57122b2ec4f81ae2cf/info/TestLogRolling-testLogRolling=aac3f55253ff6dfc9aa5bff573ffec89-b103bb3aa9654ef78cae7f2cfed4db0b] into tmpdir=hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/66ee8e24fad26a57122b2ec4f81ae2cf/.tmp, totalSize=120.8 K 2024-12-05T00:27:52,366 DEBUG [RS:0;2113c16e5528:45497-longCompactions-0 {}] compactions.Compactor(225): Compacting f4813b19d3c1461babda1d90a4fc8f74.aac3f55253ff6dfc9aa5bff573ffec89, keycount=32, bloomtype=ROW, size=73.6 K, encoding=NONE, compression=NONE, seqNum=83, earliestPutTs=1733358467467 2024-12-05T00:27:52,366 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=12 updating hbase:meta row=66ee8e24fad26a57122b2ec4f81ae2cf, regionState=OPEN, openSeqNum=127, regionLocation=2113c16e5528,45497,1733358456539 2024-12-05T00:27:52,367 DEBUG [RS:0;2113c16e5528:45497-longCompactions-0 {}] compactions.Compactor(225): Compacting TestLogRolling-testLogRolling=aac3f55253ff6dfc9aa5bff573ffec89-36320405a6e544068ff789ce51073ebc, keycount=14, bloomtype=ROW, size=19.6 K, encoding=NONE, compression=NONE, seqNum=99, earliestPutTs=1733358471583 2024-12-05T00:27:52,367 DEBUG [RS:0;2113c16e5528:45497-longCompactions-0 {}] compactions.Compactor(225): Compacting TestLogRolling-testLogRolling=aac3f55253ff6dfc9aa5bff573ffec89-e63154bef1e04d629c303c2678652967, keycount=13, bloomtype=ROW, size=18.5 K, encoding=NONE, compression=NONE, seqNum=115, earliestPutTs=1733358471606 2024-12-05T00:27:52,368 DEBUG [RS:0;2113c16e5528:45497-longCompactions-0 {}] compactions.Compactor(225): Compacting TestLogRolling-testLogRolling=aac3f55253ff6dfc9aa5bff573ffec89-b103bb3aa9654ef78cae7f2cfed4db0b, keycount=4, bloomtype=ROW, size=9.1 K, encoding=NONE, compression=NONE, seqNum=123, earliestPutTs=1733358471630 2024-12-05T00:27:52,368 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=13, ppid=12, state=RUNNABLE, hasLock=false; OpenRegionProcedure 66ee8e24fad26a57122b2ec4f81ae2cf, server=2113c16e5528,45497,1733358456539 because future has completed 2024-12-05T00:27:52,371 INFO [RS:0;2113c16e5528:45497-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 1 (all) file(s) in f5a5551bdbe933a1af0f5b8ffbd7feee/info of f5a5551bdbe933a1af0f5b8ffbd7feee into ee0265f163ca434aa2e3a1a799d2b0cd(size=69.2 K), total size for store is 69.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-05T00:27:52,371 DEBUG [RS:0;2113c16e5528:45497-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for f5a5551bdbe933a1af0f5b8ffbd7feee: 2024-12-05T00:27:52,371 INFO [RS:0;2113c16e5528:45497-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1733358471649.f5a5551bdbe933a1af0f5b8ffbd7feee., storeName=f5a5551bdbe933a1af0f5b8ffbd7feee/info, priority=15, startTime=1733358472327; duration=0sec 2024-12-05T00:27:52,371 DEBUG [RS:0;2113c16e5528:45497-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-05T00:27:52,371 DEBUG [RS:0;2113c16e5528:45497-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f5a5551bdbe933a1af0f5b8ffbd7feee:info 2024-12-05T00:27:52,372 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=13, resume processing ppid=12 2024-12-05T00:27:52,372 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, ppid=12, state=SUCCESS, hasLock=false; OpenRegionProcedure 66ee8e24fad26a57122b2ec4f81ae2cf, server=2113c16e5528,45497,1733358456539 in 218 msec 2024-12-05T00:27:52,375 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=12, resume processing ppid=7 2024-12-05T00:27:52,375 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=66ee8e24fad26a57122b2ec4f81ae2cf, ASSIGN in 376 msec 2024-12-05T00:27:52,377 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=aac3f55253ff6dfc9aa5bff573ffec89, daughterA=f5a5551bdbe933a1af0f5b8ffbd7feee, daughterB=66ee8e24fad26a57122b2ec4f81ae2cf in 725 msec 2024-12-05T00:27:52,377 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=8, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=aac3f55253ff6dfc9aa5bff573ffec89, daughterA=4a6169c44fd20841625fef7b3156fe1e, daughterB=5960d802fd4321e57063b4c3bf9770ba 2024-12-05T00:27:52,377 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=8, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=aac3f55253ff6dfc9aa5bff573ffec89, daughterA=4a6169c44fd20841625fef7b3156fe1e, daughterB=5960d802fd4321e57063b4c3bf9770ba 2024-12-05T00:27:52,377 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=8, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=aac3f55253ff6dfc9aa5bff573ffec89, daughterA=4a6169c44fd20841625fef7b3156fe1e, daughterB=5960d802fd4321e57063b4c3bf9770ba 2024-12-05T00:27:52,378 INFO [PEWorker-1 {}] assignment.SplitTableRegionProcedure(534): Split of {ENCODED => aac3f55253ff6dfc9aa5bff573ffec89, NAME => 'TestLogRolling-testLogRolling,,1733358457366.aac3f55253ff6dfc9aa5bff573ffec89.', STARTKEY => '', ENDKEY => ''} skipped; state is already SPLIT 2024-12-05T00:27:52,380 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, state=SUCCESS, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=aac3f55253ff6dfc9aa5bff573ffec89, daughterA=4a6169c44fd20841625fef7b3156fe1e, daughterB=5960d802fd4321e57063b4c3bf9770ba in 720 msec 2024-12-05T00:27:52,381 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/hbase/meta/1588230740/.tmp/ns/e128b309475d49b389a846e87f88f674 is 43, key is default/ns:d/1733358457334/Put/seqid=0 2024-12-05T00:27:52,389 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36047 is added to blk_1073741853_1029 (size=5153) 2024-12-05T00:27:52,390 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44469 is added to blk_1073741853_1029 (size=5153) 2024-12-05T00:27:52,390 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/hbase/meta/1588230740/.tmp/ns/e128b309475d49b389a846e87f88f674 2024-12-05T00:27:52,399 INFO [RS:0;2113c16e5528:45497-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 66ee8e24fad26a57122b2ec4f81ae2cf#info#compaction#67 average throughput is 17.96 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-05T00:27:52,399 DEBUG [RS:0;2113c16e5528:45497-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/66ee8e24fad26a57122b2ec4f81ae2cf/.tmp/info/c7598cd2c2174aaa942fb5b095e617a6 is 1080, key is row0062/info:/1733358469574/Put/seqid=0 2024-12-05T00:27:52,403 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44469 is added to blk_1073741854_1030 (size=43081) 2024-12-05T00:27:52,404 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36047 is added to blk_1073741854_1030 (size=43081) 2024-12-05T00:27:52,408 DEBUG [RS:0;2113c16e5528:45497-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/66ee8e24fad26a57122b2ec4f81ae2cf/.tmp/info/c7598cd2c2174aaa942fb5b095e617a6 as hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/66ee8e24fad26a57122b2ec4f81ae2cf/info/c7598cd2c2174aaa942fb5b095e617a6 2024-12-05T00:27:52,410 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/hbase/meta/1588230740/.tmp/table/577a9a18a31b401aa0834c12b201970a is 65, key is TestLogRolling-testLogRolling/table:state/1733358457725/Put/seqid=0 2024-12-05T00:27:52,414 INFO [RS:0;2113c16e5528:45497-longCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 4 (all) file(s) in 66ee8e24fad26a57122b2ec4f81ae2cf/info of 66ee8e24fad26a57122b2ec4f81ae2cf into c7598cd2c2174aaa942fb5b095e617a6(size=42.1 K), total size for store is 42.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-05T00:27:52,414 DEBUG [RS:0;2113c16e5528:45497-longCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 66ee8e24fad26a57122b2ec4f81ae2cf: 2024-12-05T00:27:52,414 INFO [RS:0;2113c16e5528:45497-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1733358471649.66ee8e24fad26a57122b2ec4f81ae2cf., storeName=66ee8e24fad26a57122b2ec4f81ae2cf/info, priority=12, startTime=1733358472362; duration=0sec 2024-12-05T00:27:52,414 DEBUG [RS:0;2113c16e5528:45497-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-05T00:27:52,414 DEBUG [RS:0;2113c16e5528:45497-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 66ee8e24fad26a57122b2ec4f81ae2cf:info 2024-12-05T00:27:52,415 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44469 is added to blk_1073741855_1031 (size=5340) 2024-12-05T00:27:52,416 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36047 is added to blk_1073741855_1031 (size=5340) 2024-12-05T00:27:52,416 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=122 B at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/hbase/meta/1588230740/.tmp/table/577a9a18a31b401aa0834c12b201970a 2024-12-05T00:27:52,421 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/hbase/meta/1588230740/.tmp/info/7bab82d9a0a84f68a19b5130191d7995 as hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/hbase/meta/1588230740/info/7bab82d9a0a84f68a19b5130191d7995 2024-12-05T00:27:52,425 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/hbase/meta/1588230740/info/7bab82d9a0a84f68a19b5130191d7995, entries=30, sequenceid=17, filesize=9.6 K 2024-12-05T00:27:52,426 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/hbase/meta/1588230740/.tmp/ns/e128b309475d49b389a846e87f88f674 as hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/hbase/meta/1588230740/ns/e128b309475d49b389a846e87f88f674 2024-12-05T00:27:52,431 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/hbase/meta/1588230740/ns/e128b309475d49b389a846e87f88f674, entries=2, sequenceid=17, filesize=5.0 K 2024-12-05T00:27:52,431 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/hbase/meta/1588230740/.tmp/table/577a9a18a31b401aa0834c12b201970a as hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/hbase/meta/1588230740/table/577a9a18a31b401aa0834c12b201970a 2024-12-05T00:27:52,436 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/hbase/meta/1588230740/table/577a9a18a31b401aa0834c12b201970a, entries=2, sequenceid=17, filesize=5.2 K 2024-12-05T00:27:52,437 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~5.11 KB/5234, heapSize ~8.66 KB/8872, currentSize=705 B/705 for 1588230740 in 104ms, sequenceid=17, compaction requested=false 2024-12-05T00:27:52,437 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-12-05T00:27:53,240 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,37749,1733358324931/2113c16e5528%2C37749%2C1733358324931.meta.1733358325913.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:27:53,240 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,45031,1733358326108/2113c16e5528%2C45031%2C1733358326108.1733358326307 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:27:53,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45497 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:58192 deadline: 1733358483639, exception=org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1733358457366.aac3f55253ff6dfc9aa5bff573ffec89. is not online on 2113c16e5528,45497,1733358456539 2024-12-05T00:27:53,665 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,,1733358457366.aac3f55253ff6dfc9aa5bff573ffec89., hostname=2113c16e5528,45497,1733358456539, seqNum=2 , the old value is region=TestLogRolling-testLogRolling,,1733358457366.aac3f55253ff6dfc9aa5bff573ffec89., hostname=2113c16e5528,45497,1733358456539, seqNum=2, error=org.apache.hadoop.hbase.NotServingRegionException: org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1733358457366.aac3f55253ff6dfc9aa5bff573ffec89. is not online on 2113c16e5528,45497,1733358456539 at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegionByEncodedName(HRegionServer.java:3186) at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegion(HRegionServer.java:3164) at org.apache.hadoop.hbase.regionserver.RSRpcServices.getRegion(RSRpcServices.java:1413) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2943) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-05T00:27:53,666 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,,1733358457366.aac3f55253ff6dfc9aa5bff573ffec89., hostname=2113c16e5528,45497,1733358456539, seqNum=2 is org.apache.hadoop.hbase.NotServingRegionException: org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1733358457366.aac3f55253ff6dfc9aa5bff573ffec89. is not online on 2113c16e5528,45497,1733358456539 at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegionByEncodedName(HRegionServer.java:3186) at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegion(HRegionServer.java:3164) at org.apache.hadoop.hbase.regionserver.RSRpcServices.getRegion(RSRpcServices.java:1413) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2943) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-05T00:27:53,666 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(88): Try removing region=TestLogRolling-testLogRolling,,1733358457366.aac3f55253ff6dfc9aa5bff573ffec89., hostname=2113c16e5528,45497,1733358456539, seqNum=2 from cache 2024-12-05T00:27:54,241 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,45031,1733358326108/2113c16e5528%2C45031%2C1733358326108.1733358326307 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:27:54,241 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,37749,1733358324931/2113c16e5528%2C37749%2C1733358324931.meta.1733358325913.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:27:55,242 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,37749,1733358324931/2113c16e5528%2C37749%2C1733358324931.meta.1733358325913.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:27:55,242 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,45031,1733358326108/2113c16e5528%2C45031%2C1733358326108.1733358326307 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:27:56,242 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,45031,1733358326108/2113c16e5528%2C45031%2C1733358326108.1733358326307 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:27:56,242 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,37749,1733358324931/2113c16e5528%2C37749%2C1733358324931.meta.1733358325913.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:27:57,243 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,37749,1733358324931/2113c16e5528%2C37749%2C1733358324931.meta.1733358325913.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:27:57,243 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,45031,1733358326108/2113c16e5528%2C45031%2C1733358326108.1733358326307 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:27:57,371 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-05T00:27:57,372 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-05T00:27:57,372 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-05T00:27:57,372 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-05T00:27:57,372 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-05T00:27:57,372 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-05T00:27:57,372 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-05T00:27:57,373 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-05T00:27:57,373 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-05T00:27:57,395 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-05T00:27:57,396 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-05T00:27:57,396 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-05T00:27:57,397 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-05T00:27:57,397 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-05T00:27:57,397 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-05T00:27:57,400 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-05T00:27:57,400 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-05T00:27:57,401 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-05T00:27:57,403 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-05T00:27:58,244 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,45031,1733358326108/2113c16e5528%2C45031%2C1733358326108.1733358326307 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:27:58,244 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,37749,1733358324931/2113c16e5528%2C37749%2C1733358324931.meta.1733358325913.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:27:59,244 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,45031,1733358326108/2113c16e5528%2C45031%2C1733358326108.1733358326307 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:27:59,244 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,37749,1733358324931/2113c16e5528%2C37749%2C1733358324931.meta.1733358325913.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:28:00,245 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,45031,1733358326108/2113c16e5528%2C45031%2C1733358326108.1733358326307 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:28:00,245 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,37749,1733358324931/2113c16e5528%2C37749%2C1733358324931.meta.1733358325913.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:28:01,245 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,45031,1733358326108/2113c16e5528%2C45031%2C1733358326108.1733358326307 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:28:01,245 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,37749,1733358324931/2113c16e5528%2C37749%2C1733358324931.meta.1733358325913.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:28:02,246 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,45031,1733358326108/2113c16e5528%2C45031%2C1733358326108.1733358326307 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:28:02,246 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,37749,1733358324931/2113c16e5528%2C37749%2C1733358324931.meta.1733358325913.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:28:03,247 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,45031,1733358326108/2113c16e5528%2C45031%2C1733358326108.1733358326307 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:28:03,247 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,37749,1733358324931/2113c16e5528%2C37749%2C1733358324931.meta.1733358325913.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:28:03,718 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRolling', row='row0097', locateType=CURRENT is [region=TestLogRolling-testLogRolling,row0062,1733358471649.66ee8e24fad26a57122b2ec4f81ae2cf., hostname=2113c16e5528,45497,1733358456539, seqNum=127] 2024-12-05T00:28:03,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45497 {}] regionserver.HRegion(8855): Flush requested on 66ee8e24fad26a57122b2ec4f81ae2cf 2024-12-05T00:28:03,730 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 66ee8e24fad26a57122b2ec4f81ae2cf 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-05T00:28:03,735 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/66ee8e24fad26a57122b2ec4f81ae2cf/.tmp/info/9dbe99aa82054f8ca676808bb8bfd3cc is 1080, key is row0097/info:/1733358483719/Put/seqid=0 2024-12-05T00:28:03,740 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44469 is added to blk_1073741856_1032 (size=12516) 2024-12-05T00:28:03,740 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36047 is added to blk_1073741856_1032 (size=12516) 2024-12-05T00:28:03,740 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=137 (bloomFilter=true), to=hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/66ee8e24fad26a57122b2ec4f81ae2cf/.tmp/info/9dbe99aa82054f8ca676808bb8bfd3cc 2024-12-05T00:28:03,746 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/66ee8e24fad26a57122b2ec4f81ae2cf/.tmp/info/9dbe99aa82054f8ca676808bb8bfd3cc as hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/66ee8e24fad26a57122b2ec4f81ae2cf/info/9dbe99aa82054f8ca676808bb8bfd3cc 2024-12-05T00:28:03,752 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/66ee8e24fad26a57122b2ec4f81ae2cf/info/9dbe99aa82054f8ca676808bb8bfd3cc, entries=7, sequenceid=137, filesize=12.2 K 2024-12-05T00:28:03,753 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=12.61 KB/12912 for 66ee8e24fad26a57122b2ec4f81ae2cf in 22ms, sequenceid=137, compaction requested=false 2024-12-05T00:28:03,753 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 66ee8e24fad26a57122b2ec4f81ae2cf: 2024-12-05T00:28:03,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45497 {}] regionserver.HRegion(8855): Flush requested on 66ee8e24fad26a57122b2ec4f81ae2cf 2024-12-05T00:28:03,754 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 66ee8e24fad26a57122b2ec4f81ae2cf 1/1 column families, dataSize=14.71 KB heapSize=16 KB 2024-12-05T00:28:03,758 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/66ee8e24fad26a57122b2ec4f81ae2cf/.tmp/info/93190ac88bd946d7b33151d54f7fad16 is 1080, key is row0104/info:/1733358483731/Put/seqid=0 2024-12-05T00:28:03,763 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44469 is added to blk_1073741857_1033 (size=20078) 2024-12-05T00:28:03,763 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36047 is added to blk_1073741857_1033 (size=20078) 2024-12-05T00:28:03,763 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=14.71 KB at sequenceid=154 (bloomFilter=true), to=hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/66ee8e24fad26a57122b2ec4f81ae2cf/.tmp/info/93190ac88bd946d7b33151d54f7fad16 2024-12-05T00:28:03,769 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/66ee8e24fad26a57122b2ec4f81ae2cf/.tmp/info/93190ac88bd946d7b33151d54f7fad16 as hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/66ee8e24fad26a57122b2ec4f81ae2cf/info/93190ac88bd946d7b33151d54f7fad16 2024-12-05T00:28:03,774 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/66ee8e24fad26a57122b2ec4f81ae2cf/info/93190ac88bd946d7b33151d54f7fad16, entries=14, sequenceid=154, filesize=19.6 K 2024-12-05T00:28:03,775 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~14.71 KB/15064, heapSize ~15.98 KB/16368, currentSize=11.56 KB/11836 for 66ee8e24fad26a57122b2ec4f81ae2cf in 21ms, sequenceid=154, compaction requested=true 2024-12-05T00:28:03,775 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 66ee8e24fad26a57122b2ec4f81ae2cf: 2024-12-05T00:28:03,775 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 66ee8e24fad26a57122b2ec4f81ae2cf:info, priority=-2147483648, current under compaction store size is 1 2024-12-05T00:28:03,775 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-05T00:28:03,775 DEBUG [RS:0;2113c16e5528:45497-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-05T00:28:03,776 DEBUG [RS:0;2113c16e5528:45497-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 75675 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-05T00:28:03,776 DEBUG [RS:0;2113c16e5528:45497-shortCompactions-0 {}] regionserver.HStore(1541): 66ee8e24fad26a57122b2ec4f81ae2cf/info is initiating minor compaction (all files) 2024-12-05T00:28:03,776 INFO [RS:0;2113c16e5528:45497-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 66ee8e24fad26a57122b2ec4f81ae2cf/info in TestLogRolling-testLogRolling,row0062,1733358471649.66ee8e24fad26a57122b2ec4f81ae2cf. 2024-12-05T00:28:03,776 INFO [RS:0;2113c16e5528:45497-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/66ee8e24fad26a57122b2ec4f81ae2cf/info/c7598cd2c2174aaa942fb5b095e617a6, hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/66ee8e24fad26a57122b2ec4f81ae2cf/info/9dbe99aa82054f8ca676808bb8bfd3cc, hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/66ee8e24fad26a57122b2ec4f81ae2cf/info/93190ac88bd946d7b33151d54f7fad16] into tmpdir=hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/66ee8e24fad26a57122b2ec4f81ae2cf/.tmp, totalSize=73.9 K 2024-12-05T00:28:03,776 DEBUG [RS:0;2113c16e5528:45497-shortCompactions-0 {}] compactions.Compactor(225): Compacting c7598cd2c2174aaa942fb5b095e617a6, keycount=35, bloomtype=ROW, size=42.1 K, encoding=NONE, compression=NONE, seqNum=123, earliestPutTs=1733358469574 2024-12-05T00:28:03,777 DEBUG [RS:0;2113c16e5528:45497-shortCompactions-0 {}] compactions.Compactor(225): Compacting 9dbe99aa82054f8ca676808bb8bfd3cc, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=137, earliestPutTs=1733358483719 2024-12-05T00:28:03,777 DEBUG [RS:0;2113c16e5528:45497-shortCompactions-0 {}] compactions.Compactor(225): Compacting 93190ac88bd946d7b33151d54f7fad16, keycount=14, bloomtype=ROW, size=19.6 K, encoding=NONE, compression=NONE, seqNum=154, earliestPutTs=1733358483731 2024-12-05T00:28:03,787 INFO [RS:0;2113c16e5528:45497-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 66ee8e24fad26a57122b2ec4f81ae2cf#info#compaction#71 average throughput is 57.46 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-05T00:28:03,787 DEBUG [RS:0;2113c16e5528:45497-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/66ee8e24fad26a57122b2ec4f81ae2cf/.tmp/info/4920b40441c14fa39e4429db850ddc94 is 1080, key is row0062/info:/1733358469574/Put/seqid=0 2024-12-05T00:28:03,791 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44469 is added to blk_1073741858_1034 (size=65889) 2024-12-05T00:28:03,792 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36047 is added to blk_1073741858_1034 (size=65889) 2024-12-05T00:28:03,796 DEBUG [RS:0;2113c16e5528:45497-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/66ee8e24fad26a57122b2ec4f81ae2cf/.tmp/info/4920b40441c14fa39e4429db850ddc94 as hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/66ee8e24fad26a57122b2ec4f81ae2cf/info/4920b40441c14fa39e4429db850ddc94 2024-12-05T00:28:03,802 INFO [RS:0;2113c16e5528:45497-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 66ee8e24fad26a57122b2ec4f81ae2cf/info of 66ee8e24fad26a57122b2ec4f81ae2cf into 4920b40441c14fa39e4429db850ddc94(size=64.3 K), total size for store is 64.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-05T00:28:03,802 DEBUG [RS:0;2113c16e5528:45497-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 66ee8e24fad26a57122b2ec4f81ae2cf: 2024-12-05T00:28:03,802 INFO [RS:0;2113c16e5528:45497-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1733358471649.66ee8e24fad26a57122b2ec4f81ae2cf., storeName=66ee8e24fad26a57122b2ec4f81ae2cf/info, priority=13, startTime=1733358483775; duration=0sec 2024-12-05T00:28:03,802 DEBUG [RS:0;2113c16e5528:45497-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-05T00:28:03,802 DEBUG [RS:0;2113c16e5528:45497-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 66ee8e24fad26a57122b2ec4f81ae2cf:info 2024-12-05T00:28:04,247 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,37749,1733358324931/2113c16e5528%2C37749%2C1733358324931.meta.1733358325913.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:28:04,247 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,45031,1733358326108/2113c16e5528%2C45031%2C1733358326108.1733358326307 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:28:05,248 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,45031,1733358326108/2113c16e5528%2C45031%2C1733358326108.1733358326307 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:28:05,248 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,37749,1733358324931/2113c16e5528%2C37749%2C1733358324931.meta.1733358325913.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:28:05,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45497 {}] regionserver.HRegion(8855): Flush requested on 66ee8e24fad26a57122b2ec4f81ae2cf 2024-12-05T00:28:05,776 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 66ee8e24fad26a57122b2ec4f81ae2cf 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-12-05T00:28:05,780 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/66ee8e24fad26a57122b2ec4f81ae2cf/.tmp/info/cc0cf24ed217452ab27948dd51379bdf is 1080, key is row0118/info:/1733358483755/Put/seqid=0 2024-12-05T00:28:05,786 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44469 is added to blk_1073741859_1035 (size=17906) 2024-12-05T00:28:05,786 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36047 is added to blk_1073741859_1035 (size=17906) 2024-12-05T00:28:05,787 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=170 (bloomFilter=true), to=hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/66ee8e24fad26a57122b2ec4f81ae2cf/.tmp/info/cc0cf24ed217452ab27948dd51379bdf 2024-12-05T00:28:05,792 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/66ee8e24fad26a57122b2ec4f81ae2cf/.tmp/info/cc0cf24ed217452ab27948dd51379bdf as hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/66ee8e24fad26a57122b2ec4f81ae2cf/info/cc0cf24ed217452ab27948dd51379bdf 2024-12-05T00:28:05,797 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/66ee8e24fad26a57122b2ec4f81ae2cf/info/cc0cf24ed217452ab27948dd51379bdf, entries=12, sequenceid=170, filesize=17.5 K 2024-12-05T00:28:05,798 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=13.66 KB/13988 for 66ee8e24fad26a57122b2ec4f81ae2cf in 22ms, sequenceid=170, compaction requested=false 2024-12-05T00:28:05,798 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 66ee8e24fad26a57122b2ec4f81ae2cf: 2024-12-05T00:28:05,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45497 {}] regionserver.HRegion(8855): Flush requested on 66ee8e24fad26a57122b2ec4f81ae2cf 2024-12-05T00:28:05,799 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 66ee8e24fad26a57122b2ec4f81ae2cf 1/1 column families, dataSize=14.71 KB heapSize=16 KB 2024-12-05T00:28:05,803 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/66ee8e24fad26a57122b2ec4f81ae2cf/.tmp/info/e72fe893029a46bebbb7ac805e486548 is 1080, key is row0130/info:/1733358485777/Put/seqid=0 2024-12-05T00:28:05,807 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44469 is added to blk_1073741860_1036 (size=20078) 2024-12-05T00:28:05,808 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36047 is added to blk_1073741860_1036 (size=20078) 2024-12-05T00:28:05,809 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=14.71 KB at sequenceid=187 (bloomFilter=true), to=hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/66ee8e24fad26a57122b2ec4f81ae2cf/.tmp/info/e72fe893029a46bebbb7ac805e486548 2024-12-05T00:28:05,815 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/66ee8e24fad26a57122b2ec4f81ae2cf/.tmp/info/e72fe893029a46bebbb7ac805e486548 as hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/66ee8e24fad26a57122b2ec4f81ae2cf/info/e72fe893029a46bebbb7ac805e486548 2024-12-05T00:28:05,820 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/66ee8e24fad26a57122b2ec4f81ae2cf/info/e72fe893029a46bebbb7ac805e486548, entries=14, sequenceid=187, filesize=19.6 K 2024-12-05T00:28:05,821 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~14.71 KB/15064, heapSize ~15.98 KB/16368, currentSize=13.66 KB/13988 for 66ee8e24fad26a57122b2ec4f81ae2cf in 22ms, sequenceid=187, compaction requested=true 2024-12-05T00:28:05,821 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 66ee8e24fad26a57122b2ec4f81ae2cf: 2024-12-05T00:28:05,821 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 66ee8e24fad26a57122b2ec4f81ae2cf:info, priority=-2147483648, current under compaction store size is 1 2024-12-05T00:28:05,821 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-05T00:28:05,821 DEBUG [RS:0;2113c16e5528:45497-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-05T00:28:05,822 DEBUG [RS:0;2113c16e5528:45497-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 103873 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-05T00:28:05,822 DEBUG [RS:0;2113c16e5528:45497-shortCompactions-0 {}] regionserver.HStore(1541): 66ee8e24fad26a57122b2ec4f81ae2cf/info is initiating minor compaction (all files) 2024-12-05T00:28:05,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45497 {}] regionserver.HRegion(8855): Flush requested on 66ee8e24fad26a57122b2ec4f81ae2cf 2024-12-05T00:28:05,822 INFO [RS:0;2113c16e5528:45497-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 66ee8e24fad26a57122b2ec4f81ae2cf/info in TestLogRolling-testLogRolling,row0062,1733358471649.66ee8e24fad26a57122b2ec4f81ae2cf. 2024-12-05T00:28:05,822 INFO [RS:0;2113c16e5528:45497-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/66ee8e24fad26a57122b2ec4f81ae2cf/info/4920b40441c14fa39e4429db850ddc94, hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/66ee8e24fad26a57122b2ec4f81ae2cf/info/cc0cf24ed217452ab27948dd51379bdf, hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/66ee8e24fad26a57122b2ec4f81ae2cf/info/e72fe893029a46bebbb7ac805e486548] into tmpdir=hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/66ee8e24fad26a57122b2ec4f81ae2cf/.tmp, totalSize=101.4 K 2024-12-05T00:28:05,822 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 66ee8e24fad26a57122b2ec4f81ae2cf 1/1 column families, dataSize=14.71 KB heapSize=16 KB 2024-12-05T00:28:05,823 DEBUG [RS:0;2113c16e5528:45497-shortCompactions-0 {}] compactions.Compactor(225): Compacting 4920b40441c14fa39e4429db850ddc94, keycount=56, bloomtype=ROW, size=64.3 K, encoding=NONE, compression=NONE, seqNum=154, earliestPutTs=1733358469574 2024-12-05T00:28:05,823 DEBUG [RS:0;2113c16e5528:45497-shortCompactions-0 {}] compactions.Compactor(225): Compacting cc0cf24ed217452ab27948dd51379bdf, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=170, earliestPutTs=1733358483755 2024-12-05T00:28:05,824 DEBUG [RS:0;2113c16e5528:45497-shortCompactions-0 {}] compactions.Compactor(225): Compacting e72fe893029a46bebbb7ac805e486548, keycount=14, bloomtype=ROW, size=19.6 K, encoding=NONE, compression=NONE, seqNum=187, earliestPutTs=1733358485777 2024-12-05T00:28:05,827 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/66ee8e24fad26a57122b2ec4f81ae2cf/.tmp/info/aceff16b1e1340b680d3b3d8fc5edd08 is 1080, key is row0144/info:/1733358485799/Put/seqid=0 2024-12-05T00:28:05,833 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44469 is added to blk_1073741861_1037 (size=20078) 2024-12-05T00:28:05,833 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36047 is added to blk_1073741861_1037 (size=20078) 2024-12-05T00:28:05,834 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=14.71 KB at sequenceid=204 (bloomFilter=true), to=hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/66ee8e24fad26a57122b2ec4f81ae2cf/.tmp/info/aceff16b1e1340b680d3b3d8fc5edd08 2024-12-05T00:28:05,837 INFO [RS:0;2113c16e5528:45497-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 66ee8e24fad26a57122b2ec4f81ae2cf#info#compaction#75 average throughput is 42.07 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-05T00:28:05,837 DEBUG [RS:0;2113c16e5528:45497-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/66ee8e24fad26a57122b2ec4f81ae2cf/.tmp/info/4a376549b1b84d94921db1eb92a9a4f4 is 1080, key is row0062/info:/1733358469574/Put/seqid=0 2024-12-05T00:28:05,839 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/66ee8e24fad26a57122b2ec4f81ae2cf/.tmp/info/aceff16b1e1340b680d3b3d8fc5edd08 as hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/66ee8e24fad26a57122b2ec4f81ae2cf/info/aceff16b1e1340b680d3b3d8fc5edd08 2024-12-05T00:28:05,842 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44469 is added to blk_1073741862_1038 (size=94096) 2024-12-05T00:28:05,842 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36047 is added to blk_1073741862_1038 (size=94096) 2024-12-05T00:28:05,845 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/66ee8e24fad26a57122b2ec4f81ae2cf/info/aceff16b1e1340b680d3b3d8fc5edd08, entries=14, sequenceid=204, filesize=19.6 K 2024-12-05T00:28:05,846 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~14.71 KB/15064, heapSize ~15.98 KB/16368, currentSize=3.15 KB/3228 for 66ee8e24fad26a57122b2ec4f81ae2cf in 24ms, sequenceid=204, compaction requested=false 2024-12-05T00:28:05,846 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 66ee8e24fad26a57122b2ec4f81ae2cf: 2024-12-05T00:28:05,847 DEBUG [RS:0;2113c16e5528:45497-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/66ee8e24fad26a57122b2ec4f81ae2cf/.tmp/info/4a376549b1b84d94921db1eb92a9a4f4 as hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/66ee8e24fad26a57122b2ec4f81ae2cf/info/4a376549b1b84d94921db1eb92a9a4f4 2024-12-05T00:28:05,853 INFO [RS:0;2113c16e5528:45497-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 66ee8e24fad26a57122b2ec4f81ae2cf/info of 66ee8e24fad26a57122b2ec4f81ae2cf into 4a376549b1b84d94921db1eb92a9a4f4(size=91.9 K), total size for store is 111.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-05T00:28:05,853 DEBUG [RS:0;2113c16e5528:45497-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 66ee8e24fad26a57122b2ec4f81ae2cf: 2024-12-05T00:28:05,853 INFO [RS:0;2113c16e5528:45497-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1733358471649.66ee8e24fad26a57122b2ec4f81ae2cf., storeName=66ee8e24fad26a57122b2ec4f81ae2cf/info, priority=13, startTime=1733358485821; duration=0sec 2024-12-05T00:28:05,853 DEBUG [RS:0;2113c16e5528:45497-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-05T00:28:05,853 DEBUG [RS:0;2113c16e5528:45497-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 66ee8e24fad26a57122b2ec4f81ae2cf:info 2024-12-05T00:28:06,249 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,45031,1733358326108/2113c16e5528%2C45031%2C1733358326108.1733358326307 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:28:06,249 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,37749,1733358324931/2113c16e5528%2C37749%2C1733358324931.meta.1733358325913.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:28:06,476 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-05T00:28:07,249 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,45031,1733358326108/2113c16e5528%2C45031%2C1733358326108.1733358326307 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:28:07,249 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,37749,1733358324931/2113c16e5528%2C37749%2C1733358324931.meta.1733358325913.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:28:07,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45497 {}] regionserver.HRegion(8855): Flush requested on 66ee8e24fad26a57122b2ec4f81ae2cf 2024-12-05T00:28:07,835 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 66ee8e24fad26a57122b2ec4f81ae2cf 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-05T00:28:07,839 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/66ee8e24fad26a57122b2ec4f81ae2cf/.tmp/info/146b77e3cd634e01acf2f147e0899d48 is 1080, key is row0158/info:/1733358485823/Put/seqid=0 2024-12-05T00:28:07,845 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44469 is added to blk_1073741863_1039 (size=12516) 2024-12-05T00:28:07,845 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36047 is added to blk_1073741863_1039 (size=12516) 2024-12-05T00:28:07,846 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=215 (bloomFilter=true), to=hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/66ee8e24fad26a57122b2ec4f81ae2cf/.tmp/info/146b77e3cd634e01acf2f147e0899d48 2024-12-05T00:28:07,851 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/66ee8e24fad26a57122b2ec4f81ae2cf/.tmp/info/146b77e3cd634e01acf2f147e0899d48 as hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/66ee8e24fad26a57122b2ec4f81ae2cf/info/146b77e3cd634e01acf2f147e0899d48 2024-12-05T00:28:07,856 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/66ee8e24fad26a57122b2ec4f81ae2cf/info/146b77e3cd634e01acf2f147e0899d48, entries=7, sequenceid=215, filesize=12.2 K 2024-12-05T00:28:07,857 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=13.66 KB/13988 for 66ee8e24fad26a57122b2ec4f81ae2cf in 22ms, sequenceid=215, compaction requested=true 2024-12-05T00:28:07,857 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 66ee8e24fad26a57122b2ec4f81ae2cf: 2024-12-05T00:28:07,857 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 66ee8e24fad26a57122b2ec4f81ae2cf:info, priority=-2147483648, current under compaction store size is 1 2024-12-05T00:28:07,857 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-05T00:28:07,857 DEBUG [RS:0;2113c16e5528:45497-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-05T00:28:07,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45497 {}] regionserver.HRegion(8855): Flush requested on 66ee8e24fad26a57122b2ec4f81ae2cf 2024-12-05T00:28:07,858 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 66ee8e24fad26a57122b2ec4f81ae2cf 1/1 column families, dataSize=14.71 KB heapSize=16 KB 2024-12-05T00:28:07,858 DEBUG [RS:0;2113c16e5528:45497-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 126690 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-05T00:28:07,858 DEBUG [RS:0;2113c16e5528:45497-shortCompactions-0 {}] regionserver.HStore(1541): 66ee8e24fad26a57122b2ec4f81ae2cf/info is initiating minor compaction (all files) 2024-12-05T00:28:07,858 INFO [RS:0;2113c16e5528:45497-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 66ee8e24fad26a57122b2ec4f81ae2cf/info in TestLogRolling-testLogRolling,row0062,1733358471649.66ee8e24fad26a57122b2ec4f81ae2cf. 2024-12-05T00:28:07,859 INFO [RS:0;2113c16e5528:45497-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/66ee8e24fad26a57122b2ec4f81ae2cf/info/4a376549b1b84d94921db1eb92a9a4f4, hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/66ee8e24fad26a57122b2ec4f81ae2cf/info/aceff16b1e1340b680d3b3d8fc5edd08, hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/66ee8e24fad26a57122b2ec4f81ae2cf/info/146b77e3cd634e01acf2f147e0899d48] into tmpdir=hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/66ee8e24fad26a57122b2ec4f81ae2cf/.tmp, totalSize=123.7 K 2024-12-05T00:28:07,859 DEBUG [RS:0;2113c16e5528:45497-shortCompactions-0 {}] compactions.Compactor(225): Compacting 4a376549b1b84d94921db1eb92a9a4f4, keycount=82, bloomtype=ROW, size=91.9 K, encoding=NONE, compression=NONE, seqNum=187, earliestPutTs=1733358469574 2024-12-05T00:28:07,860 DEBUG [RS:0;2113c16e5528:45497-shortCompactions-0 {}] compactions.Compactor(225): Compacting aceff16b1e1340b680d3b3d8fc5edd08, keycount=14, bloomtype=ROW, size=19.6 K, encoding=NONE, compression=NONE, seqNum=204, earliestPutTs=1733358485799 2024-12-05T00:28:07,860 DEBUG [RS:0;2113c16e5528:45497-shortCompactions-0 {}] compactions.Compactor(225): Compacting 146b77e3cd634e01acf2f147e0899d48, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=215, earliestPutTs=1733358485823 2024-12-05T00:28:07,862 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/66ee8e24fad26a57122b2ec4f81ae2cf/.tmp/info/91cdecbaf61f490893644101621dfa68 is 1080, key is row0165/info:/1733358487836/Put/seqid=0 2024-12-05T00:28:07,867 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36047 is added to blk_1073741864_1040 (size=20078) 2024-12-05T00:28:07,868 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44469 is added to blk_1073741864_1040 (size=20078) 2024-12-05T00:28:07,868 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=14.71 KB at sequenceid=232 (bloomFilter=true), to=hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/66ee8e24fad26a57122b2ec4f81ae2cf/.tmp/info/91cdecbaf61f490893644101621dfa68 2024-12-05T00:28:07,872 INFO [RS:0;2113c16e5528:45497-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 66ee8e24fad26a57122b2ec4f81ae2cf#info#compaction#78 average throughput is 52.85 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-05T00:28:07,873 DEBUG [RS:0;2113c16e5528:45497-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/66ee8e24fad26a57122b2ec4f81ae2cf/.tmp/info/a3d71e4547fe431980e417c9e4281ed2 is 1080, key is row0062/info:/1733358469574/Put/seqid=0 2024-12-05T00:28:07,874 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/66ee8e24fad26a57122b2ec4f81ae2cf/.tmp/info/91cdecbaf61f490893644101621dfa68 as hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/66ee8e24fad26a57122b2ec4f81ae2cf/info/91cdecbaf61f490893644101621dfa68 2024-12-05T00:28:07,876 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36047 is added to blk_1073741865_1041 (size=116840) 2024-12-05T00:28:07,876 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44469 is added to blk_1073741865_1041 (size=116840) 2024-12-05T00:28:07,879 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/66ee8e24fad26a57122b2ec4f81ae2cf/info/91cdecbaf61f490893644101621dfa68, entries=14, sequenceid=232, filesize=19.6 K 2024-12-05T00:28:07,880 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~14.71 KB/15064, heapSize ~15.98 KB/16368, currentSize=12.61 KB/12912 for 66ee8e24fad26a57122b2ec4f81ae2cf in 22ms, sequenceid=232, compaction requested=false 2024-12-05T00:28:07,880 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 66ee8e24fad26a57122b2ec4f81ae2cf: 2024-12-05T00:28:07,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45497 {}] regionserver.HRegion(8855): Flush requested on 66ee8e24fad26a57122b2ec4f81ae2cf 2024-12-05T00:28:07,882 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 66ee8e24fad26a57122b2ec4f81ae2cf 1/1 column families, dataSize=14.71 KB heapSize=16 KB 2024-12-05T00:28:07,885 DEBUG [RS:0;2113c16e5528:45497-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/66ee8e24fad26a57122b2ec4f81ae2cf/.tmp/info/a3d71e4547fe431980e417c9e4281ed2 as hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/66ee8e24fad26a57122b2ec4f81ae2cf/info/a3d71e4547fe431980e417c9e4281ed2 2024-12-05T00:28:07,886 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/66ee8e24fad26a57122b2ec4f81ae2cf/.tmp/info/560822d7f61c4fcfa9026fe971e2b908 is 1080, key is row0179/info:/1733358487859/Put/seqid=0 2024-12-05T00:28:07,890 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36047 is added to blk_1073741866_1042 (size=20078) 2024-12-05T00:28:07,890 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44469 is added to blk_1073741866_1042 (size=20078) 2024-12-05T00:28:07,891 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=14.71 KB at sequenceid=249 (bloomFilter=true), to=hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/66ee8e24fad26a57122b2ec4f81ae2cf/.tmp/info/560822d7f61c4fcfa9026fe971e2b908 2024-12-05T00:28:07,891 INFO [RS:0;2113c16e5528:45497-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 66ee8e24fad26a57122b2ec4f81ae2cf/info of 66ee8e24fad26a57122b2ec4f81ae2cf into a3d71e4547fe431980e417c9e4281ed2(size=114.1 K), total size for store is 133.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-05T00:28:07,891 DEBUG [RS:0;2113c16e5528:45497-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 66ee8e24fad26a57122b2ec4f81ae2cf: 2024-12-05T00:28:07,891 INFO [RS:0;2113c16e5528:45497-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1733358471649.66ee8e24fad26a57122b2ec4f81ae2cf., storeName=66ee8e24fad26a57122b2ec4f81ae2cf/info, priority=13, startTime=1733358487857; duration=0sec 2024-12-05T00:28:07,891 DEBUG [RS:0;2113c16e5528:45497-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-05T00:28:07,891 DEBUG [RS:0;2113c16e5528:45497-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 66ee8e24fad26a57122b2ec4f81ae2cf:info 2024-12-05T00:28:07,895 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/66ee8e24fad26a57122b2ec4f81ae2cf/.tmp/info/560822d7f61c4fcfa9026fe971e2b908 as hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/66ee8e24fad26a57122b2ec4f81ae2cf/info/560822d7f61c4fcfa9026fe971e2b908 2024-12-05T00:28:07,898 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/66ee8e24fad26a57122b2ec4f81ae2cf/info/560822d7f61c4fcfa9026fe971e2b908, entries=14, sequenceid=249, filesize=19.6 K 2024-12-05T00:28:07,899 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~14.71 KB/15064, heapSize ~15.98 KB/16368, currentSize=0 B/0 for 66ee8e24fad26a57122b2ec4f81ae2cf in 17ms, sequenceid=249, compaction requested=true 2024-12-05T00:28:07,899 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 66ee8e24fad26a57122b2ec4f81ae2cf: 2024-12-05T00:28:07,899 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 66ee8e24fad26a57122b2ec4f81ae2cf:info, priority=-2147483648, current under compaction store size is 1 2024-12-05T00:28:07,899 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-05T00:28:07,899 DEBUG [RS:0;2113c16e5528:45497-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-05T00:28:07,900 DEBUG [RS:0;2113c16e5528:45497-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 156996 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-05T00:28:07,900 DEBUG [RS:0;2113c16e5528:45497-shortCompactions-0 {}] regionserver.HStore(1541): 66ee8e24fad26a57122b2ec4f81ae2cf/info is initiating minor compaction (all files) 2024-12-05T00:28:07,900 INFO [RS:0;2113c16e5528:45497-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 66ee8e24fad26a57122b2ec4f81ae2cf/info in TestLogRolling-testLogRolling,row0062,1733358471649.66ee8e24fad26a57122b2ec4f81ae2cf. 2024-12-05T00:28:07,900 INFO [RS:0;2113c16e5528:45497-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/66ee8e24fad26a57122b2ec4f81ae2cf/info/a3d71e4547fe431980e417c9e4281ed2, hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/66ee8e24fad26a57122b2ec4f81ae2cf/info/91cdecbaf61f490893644101621dfa68, hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/66ee8e24fad26a57122b2ec4f81ae2cf/info/560822d7f61c4fcfa9026fe971e2b908] into tmpdir=hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/66ee8e24fad26a57122b2ec4f81ae2cf/.tmp, totalSize=153.3 K 2024-12-05T00:28:07,901 DEBUG [RS:0;2113c16e5528:45497-shortCompactions-0 {}] compactions.Compactor(225): Compacting a3d71e4547fe431980e417c9e4281ed2, keycount=103, bloomtype=ROW, size=114.1 K, encoding=NONE, compression=NONE, seqNum=215, earliestPutTs=1733358469574 2024-12-05T00:28:07,901 DEBUG [RS:0;2113c16e5528:45497-shortCompactions-0 {}] compactions.Compactor(225): Compacting 91cdecbaf61f490893644101621dfa68, keycount=14, bloomtype=ROW, size=19.6 K, encoding=NONE, compression=NONE, seqNum=232, earliestPutTs=1733358487836 2024-12-05T00:28:07,901 DEBUG [RS:0;2113c16e5528:45497-shortCompactions-0 {}] compactions.Compactor(225): Compacting 560822d7f61c4fcfa9026fe971e2b908, keycount=14, bloomtype=ROW, size=19.6 K, encoding=NONE, compression=NONE, seqNum=249, earliestPutTs=1733358487859 2024-12-05T00:28:07,911 INFO [RS:0;2113c16e5528:45497-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 66ee8e24fad26a57122b2ec4f81ae2cf#info#compaction#80 average throughput is 67.21 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-05T00:28:07,912 DEBUG [RS:0;2113c16e5528:45497-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/66ee8e24fad26a57122b2ec4f81ae2cf/.tmp/info/a7f45b45ba754ab79636274d88d811d7 is 1080, key is row0062/info:/1733358469574/Put/seqid=0 2024-12-05T00:28:07,915 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36047 is added to blk_1073741867_1043 (size=147331) 2024-12-05T00:28:07,915 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44469 is added to blk_1073741867_1043 (size=147331) 2024-12-05T00:28:07,920 DEBUG [RS:0;2113c16e5528:45497-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/66ee8e24fad26a57122b2ec4f81ae2cf/.tmp/info/a7f45b45ba754ab79636274d88d811d7 as hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/66ee8e24fad26a57122b2ec4f81ae2cf/info/a7f45b45ba754ab79636274d88d811d7 2024-12-05T00:28:07,925 INFO [RS:0;2113c16e5528:45497-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 66ee8e24fad26a57122b2ec4f81ae2cf/info of 66ee8e24fad26a57122b2ec4f81ae2cf into a7f45b45ba754ab79636274d88d811d7(size=143.9 K), total size for store is 143.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-05T00:28:07,925 DEBUG [RS:0;2113c16e5528:45497-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 66ee8e24fad26a57122b2ec4f81ae2cf: 2024-12-05T00:28:07,925 INFO [RS:0;2113c16e5528:45497-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1733358471649.66ee8e24fad26a57122b2ec4f81ae2cf., storeName=66ee8e24fad26a57122b2ec4f81ae2cf/info, priority=13, startTime=1733358487899; duration=0sec 2024-12-05T00:28:07,925 DEBUG [RS:0;2113c16e5528:45497-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-05T00:28:07,925 DEBUG [RS:0;2113c16e5528:45497-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 66ee8e24fad26a57122b2ec4f81ae2cf:info 2024-12-05T00:28:08,250 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,45031,1733358326108/2113c16e5528%2C45031%2C1733358326108.1733358326307 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:28:08,250 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,37749,1733358324931/2113c16e5528%2C37749%2C1733358324931.meta.1733358325913.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:28:09,250 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,45031,1733358326108/2113c16e5528%2C45031%2C1733358326108.1733358326307 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:28:09,250 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,37749,1733358324931/2113c16e5528%2C37749%2C1733358324931.meta.1733358325913.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:28:09,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45497 {}] regionserver.HRegion(8855): Flush requested on 66ee8e24fad26a57122b2ec4f81ae2cf 2024-12-05T00:28:09,892 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 66ee8e24fad26a57122b2ec4f81ae2cf 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-05T00:28:09,896 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/66ee8e24fad26a57122b2ec4f81ae2cf/.tmp/info/67eba21305ff4cb6bbf0dd0137027d89 is 1080, key is row0193/info:/1733358489883/Put/seqid=0 2024-12-05T00:28:09,904 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44469 is added to blk_1073741868_1044 (size=12521) 2024-12-05T00:28:09,904 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36047 is added to blk_1073741868_1044 (size=12521) 2024-12-05T00:28:09,923 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45497 {}] regionserver.HRegion(5310): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=66ee8e24fad26a57122b2ec4f81ae2cf, server=2113c16e5528,45497,1733358456539 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:3.0.0-beta-2-SNAPSHOT] 2024-12-05T00:28:09,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45497 {}] ipc.CallRunner(138): callId: 230 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:58192 deadline: 1733358499923, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=66ee8e24fad26a57122b2ec4f81ae2cf, server=2113c16e5528,45497,1733358456539 2024-12-05T00:28:09,924 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,row0062,1733358471649.66ee8e24fad26a57122b2ec4f81ae2cf., hostname=2113c16e5528,45497,1733358456539, seqNum=127 , the old value is region=TestLogRolling-testLogRolling,row0062,1733358471649.66ee8e24fad26a57122b2ec4f81ae2cf., hostname=2113c16e5528,45497,1733358456539, seqNum=127, error=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=66ee8e24fad26a57122b2ec4f81ae2cf, server=2113c16e5528,45497,1733358456539 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-05T00:28:09,924 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,row0062,1733358471649.66ee8e24fad26a57122b2ec4f81ae2cf., hostname=2113c16e5528,45497,1733358456539, seqNum=127 is org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=66ee8e24fad26a57122b2ec4f81ae2cf, server=2113c16e5528,45497,1733358456539 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-05T00:28:09,925 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(76): Will not update region=TestLogRolling-testLogRolling,row0062,1733358471649.66ee8e24fad26a57122b2ec4f81ae2cf., hostname=2113c16e5528,45497,1733358456539, seqNum=127 because the exception is null or not the one we care about 2024-12-05T00:28:10,251 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,37749,1733358324931/2113c16e5528%2C37749%2C1733358324931.meta.1733358325913.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:28:10,251 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,45031,1733358326108/2113c16e5528%2C45031%2C1733358326108.1733358326307 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:28:10,305 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=261 (bloomFilter=true), to=hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/66ee8e24fad26a57122b2ec4f81ae2cf/.tmp/info/67eba21305ff4cb6bbf0dd0137027d89 2024-12-05T00:28:10,310 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/66ee8e24fad26a57122b2ec4f81ae2cf/.tmp/info/67eba21305ff4cb6bbf0dd0137027d89 as hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/66ee8e24fad26a57122b2ec4f81ae2cf/info/67eba21305ff4cb6bbf0dd0137027d89 2024-12-05T00:28:10,314 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/66ee8e24fad26a57122b2ec4f81ae2cf/info/67eba21305ff4cb6bbf0dd0137027d89, entries=7, sequenceid=261, filesize=12.2 K 2024-12-05T00:28:10,316 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=23.12 KB/23672 for 66ee8e24fad26a57122b2ec4f81ae2cf in 423ms, sequenceid=261, compaction requested=false 2024-12-05T00:28:10,316 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 66ee8e24fad26a57122b2ec4f81ae2cf: 2024-12-05T00:28:11,251 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,45031,1733358326108/2113c16e5528%2C45031%2C1733358326108.1733358326307 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:28:11,251 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,37749,1733358324931/2113c16e5528%2C37749%2C1733358324931.meta.1733358325913.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:28:12,252 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,37749,1733358324931/2113c16e5528%2C37749%2C1733358324931.meta.1733358325913.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:28:12,252 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,45031,1733358326108/2113c16e5528%2C45031%2C1733358326108.1733358326307 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:28:13,253 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,45031,1733358326108/2113c16e5528%2C45031%2C1733358326108.1733358326307 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:28:13,253 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,37749,1733358324931/2113c16e5528%2C37749%2C1733358324931.meta.1733358325913.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:28:14,253 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,45031,1733358326108/2113c16e5528%2C45031%2C1733358326108.1733358326307 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:28:14,253 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,37749,1733358324931/2113c16e5528%2C37749%2C1733358324931.meta.1733358325913.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:28:15,254 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,37749,1733358324931/2113c16e5528%2C37749%2C1733358324931.meta.1733358325913.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:28:15,254 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,45031,1733358326108/2113c16e5528%2C45031%2C1733358326108.1733358326307 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:28:16,255 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,45031,1733358326108/2113c16e5528%2C45031%2C1733358326108.1733358326307 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:28:16,255 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,37749,1733358324931/2113c16e5528%2C37749%2C1733358324931.meta.1733358325913.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:28:17,255 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,37749,1733358324931/2113c16e5528%2C37749%2C1733358324931.meta.1733358325913.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:28:17,255 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,45031,1733358326108/2113c16e5528%2C45031%2C1733358326108.1733358326307 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:28:17,776 INFO [master/2113c16e5528:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-12-05T00:28:17,776 INFO [master/2113c16e5528:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-12-05T00:28:18,256 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,37749,1733358324931/2113c16e5528%2C37749%2C1733358324931.meta.1733358325913.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:28:18,256 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,45031,1733358326108/2113c16e5528%2C45031%2C1733358326108.1733358326307 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:28:19,256 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,37749,1733358324931/2113c16e5528%2C37749%2C1733358324931.meta.1733358325913.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:28:19,256 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,45031,1733358326108/2113c16e5528%2C45031%2C1733358326108.1733358326307 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:28:20,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45497 {}] regionserver.HRegion(8855): Flush requested on 66ee8e24fad26a57122b2ec4f81ae2cf 2024-12-05T00:28:20,033 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 66ee8e24fad26a57122b2ec4f81ae2cf 1/1 column families, dataSize=24.17 KB heapSize=26.13 KB 2024-12-05T00:28:20,037 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/66ee8e24fad26a57122b2ec4f81ae2cf/.tmp/info/b41d4aebf32646f698fb54269d7b41cc is 1080, key is row0200/info:/1733358489893/Put/seqid=0 2024-12-05T00:28:20,042 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36047 is added to blk_1073741869_1045 (size=29807) 2024-12-05T00:28:20,042 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44469 is added to blk_1073741869_1045 (size=29807) 2024-12-05T00:28:20,042 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.17 KB at sequenceid=287 (bloomFilter=true), to=hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/66ee8e24fad26a57122b2ec4f81ae2cf/.tmp/info/b41d4aebf32646f698fb54269d7b41cc 2024-12-05T00:28:20,047 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/66ee8e24fad26a57122b2ec4f81ae2cf/.tmp/info/b41d4aebf32646f698fb54269d7b41cc as hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/66ee8e24fad26a57122b2ec4f81ae2cf/info/b41d4aebf32646f698fb54269d7b41cc 2024-12-05T00:28:20,052 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/66ee8e24fad26a57122b2ec4f81ae2cf/info/b41d4aebf32646f698fb54269d7b41cc, entries=23, sequenceid=287, filesize=29.1 K 2024-12-05T00:28:20,053 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~24.17 KB/24748, heapSize ~26.11 KB/26736, currentSize=2.10 KB/2152 for 66ee8e24fad26a57122b2ec4f81ae2cf in 19ms, sequenceid=287, compaction requested=true 2024-12-05T00:28:20,053 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 66ee8e24fad26a57122b2ec4f81ae2cf: 2024-12-05T00:28:20,053 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 66ee8e24fad26a57122b2ec4f81ae2cf:info, priority=-2147483648, current under compaction store size is 1 2024-12-05T00:28:20,053 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-05T00:28:20,053 DEBUG [RS:0;2113c16e5528:45497-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-05T00:28:20,054 DEBUG [RS:0;2113c16e5528:45497-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 189659 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-05T00:28:20,054 DEBUG [RS:0;2113c16e5528:45497-shortCompactions-0 {}] regionserver.HStore(1541): 66ee8e24fad26a57122b2ec4f81ae2cf/info is initiating minor compaction (all files) 2024-12-05T00:28:20,054 INFO [RS:0;2113c16e5528:45497-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 66ee8e24fad26a57122b2ec4f81ae2cf/info in TestLogRolling-testLogRolling,row0062,1733358471649.66ee8e24fad26a57122b2ec4f81ae2cf. 2024-12-05T00:28:20,054 INFO [RS:0;2113c16e5528:45497-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/66ee8e24fad26a57122b2ec4f81ae2cf/info/a7f45b45ba754ab79636274d88d811d7, hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/66ee8e24fad26a57122b2ec4f81ae2cf/info/67eba21305ff4cb6bbf0dd0137027d89, hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/66ee8e24fad26a57122b2ec4f81ae2cf/info/b41d4aebf32646f698fb54269d7b41cc] into tmpdir=hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/66ee8e24fad26a57122b2ec4f81ae2cf/.tmp, totalSize=185.2 K 2024-12-05T00:28:20,054 DEBUG [RS:0;2113c16e5528:45497-shortCompactions-0 {}] compactions.Compactor(225): Compacting a7f45b45ba754ab79636274d88d811d7, keycount=131, bloomtype=ROW, size=143.9 K, encoding=NONE, compression=NONE, seqNum=249, earliestPutTs=1733358469574 2024-12-05T00:28:20,055 DEBUG [RS:0;2113c16e5528:45497-shortCompactions-0 {}] compactions.Compactor(225): Compacting 67eba21305ff4cb6bbf0dd0137027d89, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=261, earliestPutTs=1733358489883 2024-12-05T00:28:20,055 DEBUG [RS:0;2113c16e5528:45497-shortCompactions-0 {}] compactions.Compactor(225): Compacting b41d4aebf32646f698fb54269d7b41cc, keycount=23, bloomtype=ROW, size=29.1 K, encoding=NONE, compression=NONE, seqNum=287, earliestPutTs=1733358489893 2024-12-05T00:28:20,066 INFO [RS:0;2113c16e5528:45497-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 66ee8e24fad26a57122b2ec4f81ae2cf#info#compaction#83 average throughput is 55.07 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-05T00:28:20,067 DEBUG [RS:0;2113c16e5528:45497-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/66ee8e24fad26a57122b2ec4f81ae2cf/.tmp/info/895649f1633a4ffeb36e95b2b61068cc is 1080, key is row0062/info:/1733358469574/Put/seqid=0 2024-12-05T00:28:20,070 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44469 is added to blk_1073741870_1046 (size=179809) 2024-12-05T00:28:20,070 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36047 is added to blk_1073741870_1046 (size=179809) 2024-12-05T00:28:20,075 DEBUG [RS:0;2113c16e5528:45497-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/66ee8e24fad26a57122b2ec4f81ae2cf/.tmp/info/895649f1633a4ffeb36e95b2b61068cc as hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/66ee8e24fad26a57122b2ec4f81ae2cf/info/895649f1633a4ffeb36e95b2b61068cc 2024-12-05T00:28:20,080 INFO [RS:0;2113c16e5528:45497-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 66ee8e24fad26a57122b2ec4f81ae2cf/info of 66ee8e24fad26a57122b2ec4f81ae2cf into 895649f1633a4ffeb36e95b2b61068cc(size=175.6 K), total size for store is 175.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-05T00:28:20,080 DEBUG [RS:0;2113c16e5528:45497-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 66ee8e24fad26a57122b2ec4f81ae2cf: 2024-12-05T00:28:20,080 INFO [RS:0;2113c16e5528:45497-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1733358471649.66ee8e24fad26a57122b2ec4f81ae2cf., storeName=66ee8e24fad26a57122b2ec4f81ae2cf/info, priority=13, startTime=1733358500053; duration=0sec 2024-12-05T00:28:20,080 DEBUG [RS:0;2113c16e5528:45497-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-05T00:28:20,080 DEBUG [RS:0;2113c16e5528:45497-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 66ee8e24fad26a57122b2ec4f81ae2cf:info 2024-12-05T00:28:20,257 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,37749,1733358324931/2113c16e5528%2C37749%2C1733358324931.meta.1733358325913.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:28:20,257 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,45031,1733358326108/2113c16e5528%2C45031%2C1733358326108.1733358326307 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:28:21,257 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,37749,1733358324931/2113c16e5528%2C37749%2C1733358324931.meta.1733358325913.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:28:21,257 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,45031,1733358326108/2113c16e5528%2C45031%2C1733358326108.1733358326307 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:28:22,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45497 {}] regionserver.HRegion(8855): Flush requested on 66ee8e24fad26a57122b2ec4f81ae2cf 2024-12-05T00:28:22,046 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 66ee8e24fad26a57122b2ec4f81ae2cf 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-05T00:28:22,050 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/66ee8e24fad26a57122b2ec4f81ae2cf/.tmp/info/0de39528406c4962b34f786fbdc30611 is 1080, key is row0223/info:/1733358500034/Put/seqid=0 2024-12-05T00:28:22,058 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44469 is added to blk_1073741871_1047 (size=12523) 2024-12-05T00:28:22,058 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36047 is added to blk_1073741871_1047 (size=12523) 2024-12-05T00:28:22,059 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=298 (bloomFilter=true), to=hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/66ee8e24fad26a57122b2ec4f81ae2cf/.tmp/info/0de39528406c4962b34f786fbdc30611 2024-12-05T00:28:22,064 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/66ee8e24fad26a57122b2ec4f81ae2cf/.tmp/info/0de39528406c4962b34f786fbdc30611 as hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/66ee8e24fad26a57122b2ec4f81ae2cf/info/0de39528406c4962b34f786fbdc30611 2024-12-05T00:28:22,068 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/66ee8e24fad26a57122b2ec4f81ae2cf/info/0de39528406c4962b34f786fbdc30611, entries=7, sequenceid=298, filesize=12.2 K 2024-12-05T00:28:22,069 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=17.86 KB/18292 for 66ee8e24fad26a57122b2ec4f81ae2cf in 24ms, sequenceid=298, compaction requested=false 2024-12-05T00:28:22,069 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 66ee8e24fad26a57122b2ec4f81ae2cf: 2024-12-05T00:28:22,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45497 {}] regionserver.HRegion(8855): Flush requested on 66ee8e24fad26a57122b2ec4f81ae2cf 2024-12-05T00:28:22,071 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 66ee8e24fad26a57122b2ec4f81ae2cf 1/1 column families, dataSize=18.91 KB heapSize=20.50 KB 2024-12-05T00:28:22,074 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/66ee8e24fad26a57122b2ec4f81ae2cf/.tmp/info/52d5e760882148f9a766657c8b7991a3 is 1080, key is row0230/info:/1733358502046/Put/seqid=0 2024-12-05T00:28:22,078 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44469 is added to blk_1073741872_1048 (size=24412) 2024-12-05T00:28:22,079 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36047 is added to blk_1073741872_1048 (size=24412) 2024-12-05T00:28:22,079 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=18.91 KB at sequenceid=319 (bloomFilter=true), to=hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/66ee8e24fad26a57122b2ec4f81ae2cf/.tmp/info/52d5e760882148f9a766657c8b7991a3 2024-12-05T00:28:22,084 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/66ee8e24fad26a57122b2ec4f81ae2cf/.tmp/info/52d5e760882148f9a766657c8b7991a3 as hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/66ee8e24fad26a57122b2ec4f81ae2cf/info/52d5e760882148f9a766657c8b7991a3 2024-12-05T00:28:22,088 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/66ee8e24fad26a57122b2ec4f81ae2cf/info/52d5e760882148f9a766657c8b7991a3, entries=18, sequenceid=319, filesize=23.8 K 2024-12-05T00:28:22,089 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~18.91 KB/19368, heapSize ~20.48 KB/20976, currentSize=9.46 KB/9684 for 66ee8e24fad26a57122b2ec4f81ae2cf in 18ms, sequenceid=319, compaction requested=true 2024-12-05T00:28:22,089 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 66ee8e24fad26a57122b2ec4f81ae2cf: 2024-12-05T00:28:22,089 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 66ee8e24fad26a57122b2ec4f81ae2cf:info, priority=-2147483648, current under compaction store size is 1 2024-12-05T00:28:22,089 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-05T00:28:22,089 DEBUG [RS:0;2113c16e5528:45497-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-05T00:28:22,090 DEBUG [RS:0;2113c16e5528:45497-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 216744 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-05T00:28:22,090 DEBUG [RS:0;2113c16e5528:45497-shortCompactions-0 {}] regionserver.HStore(1541): 66ee8e24fad26a57122b2ec4f81ae2cf/info is initiating minor compaction (all files) 2024-12-05T00:28:22,090 INFO [RS:0;2113c16e5528:45497-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 66ee8e24fad26a57122b2ec4f81ae2cf/info in TestLogRolling-testLogRolling,row0062,1733358471649.66ee8e24fad26a57122b2ec4f81ae2cf. 2024-12-05T00:28:22,091 INFO [RS:0;2113c16e5528:45497-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/66ee8e24fad26a57122b2ec4f81ae2cf/info/895649f1633a4ffeb36e95b2b61068cc, hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/66ee8e24fad26a57122b2ec4f81ae2cf/info/0de39528406c4962b34f786fbdc30611, hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/66ee8e24fad26a57122b2ec4f81ae2cf/info/52d5e760882148f9a766657c8b7991a3] into tmpdir=hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/66ee8e24fad26a57122b2ec4f81ae2cf/.tmp, totalSize=211.7 K 2024-12-05T00:28:22,091 DEBUG [RS:0;2113c16e5528:45497-shortCompactions-0 {}] compactions.Compactor(225): Compacting 895649f1633a4ffeb36e95b2b61068cc, keycount=161, bloomtype=ROW, size=175.6 K, encoding=NONE, compression=NONE, seqNum=287, earliestPutTs=1733358469574 2024-12-05T00:28:22,091 DEBUG [RS:0;2113c16e5528:45497-shortCompactions-0 {}] compactions.Compactor(225): Compacting 0de39528406c4962b34f786fbdc30611, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=298, earliestPutTs=1733358500034 2024-12-05T00:28:22,092 DEBUG [RS:0;2113c16e5528:45497-shortCompactions-0 {}] compactions.Compactor(225): Compacting 52d5e760882148f9a766657c8b7991a3, keycount=18, bloomtype=ROW, size=23.8 K, encoding=NONE, compression=NONE, seqNum=319, earliestPutTs=1733358502046 2024-12-05T00:28:22,104 INFO [RS:0;2113c16e5528:45497-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 66ee8e24fad26a57122b2ec4f81ae2cf#info#compaction#86 average throughput is 47.72 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-05T00:28:22,104 DEBUG [RS:0;2113c16e5528:45497-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/66ee8e24fad26a57122b2ec4f81ae2cf/.tmp/info/4e1816cf9e0c4adea505479f74a89665 is 1080, key is row0062/info:/1733358469574/Put/seqid=0 2024-12-05T00:28:22,106 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44469 is added to blk_1073741873_1049 (size=206963) 2024-12-05T00:28:22,106 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36047 is added to blk_1073741873_1049 (size=206963) 2024-12-05T00:28:22,111 DEBUG [RS:0;2113c16e5528:45497-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/66ee8e24fad26a57122b2ec4f81ae2cf/.tmp/info/4e1816cf9e0c4adea505479f74a89665 as hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/66ee8e24fad26a57122b2ec4f81ae2cf/info/4e1816cf9e0c4adea505479f74a89665 2024-12-05T00:28:22,117 INFO [RS:0;2113c16e5528:45497-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 66ee8e24fad26a57122b2ec4f81ae2cf/info of 66ee8e24fad26a57122b2ec4f81ae2cf into 4e1816cf9e0c4adea505479f74a89665(size=202.1 K), total size for store is 202.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-05T00:28:22,118 DEBUG [RS:0;2113c16e5528:45497-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 66ee8e24fad26a57122b2ec4f81ae2cf: 2024-12-05T00:28:22,118 INFO [RS:0;2113c16e5528:45497-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1733358471649.66ee8e24fad26a57122b2ec4f81ae2cf., storeName=66ee8e24fad26a57122b2ec4f81ae2cf/info, priority=13, startTime=1733358502089; duration=0sec 2024-12-05T00:28:22,118 DEBUG [RS:0;2113c16e5528:45497-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-05T00:28:22,118 DEBUG [RS:0;2113c16e5528:45497-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 66ee8e24fad26a57122b2ec4f81ae2cf:info 2024-12-05T00:28:22,258 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,45031,1733358326108/2113c16e5528%2C45031%2C1733358326108.1733358326307 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:28:22,258 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,37749,1733358324931/2113c16e5528%2C37749%2C1733358324931.meta.1733358325913.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:28:22,297 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 1588230740, had cached 0 bytes from a total of 20340 2024-12-05T00:28:23,259 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,45031,1733358326108/2113c16e5528%2C45031%2C1733358326108.1733358326307 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:28:23,259 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,37749,1733358324931/2113c16e5528%2C37749%2C1733358324931.meta.1733358325913.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:28:24,085 INFO [Time-limited test {}] wal.AbstractTestLogRolling(285): after writing there are 0 log files 2024-12-05T00:28:24,085 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 2113c16e5528%2C45497%2C1733358456539.1733358504085 2024-12-05T00:28:24,091 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:28:24,091 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:28:24,091 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:28:24,091 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:28:24,092 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:28:24,092 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/WALs/2113c16e5528,45497,1733358456539/2113c16e5528%2C45497%2C1733358456539.1733358456916 with entries=310, filesize=307.89 KB; new WAL /user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/WALs/2113c16e5528,45497,1733358456539/2113c16e5528%2C45497%2C1733358456539.1733358504085 2024-12-05T00:28:24,093 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41389:41389),(127.0.0.1/127.0.0.1:35401:35401)] 2024-12-05T00:28:24,093 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/WALs/2113c16e5528,45497,1733358456539/2113c16e5528%2C45497%2C1733358456539.1733358456916 is not closed yet, will try archiving it next time 2024-12-05T00:28:24,095 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36047 is added to blk_1073741833_1009 (size=315283) 2024-12-05T00:28:24,095 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44469 is added to blk_1073741833_1009 (size=315283) 2024-12-05T00:28:24,096 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=705 B heapSize=2.05 KB 2024-12-05T00:28:24,100 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/hbase/meta/1588230740/.tmp/info/541257ac8f96481aa7d600830b269071 is 193, key is TestLogRolling-testLogRolling,row0062,1733358471649.66ee8e24fad26a57122b2ec4f81ae2cf./info:regioninfo/1733358472366/Put/seqid=0 2024-12-05T00:28:24,103 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36047 is added to blk_1073741875_1051 (size=6223) 2024-12-05T00:28:24,104 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44469 is added to blk_1073741875_1051 (size=6223) 2024-12-05T00:28:24,104 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=705 B at sequenceid=21 (bloomFilter=true), to=hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/hbase/meta/1588230740/.tmp/info/541257ac8f96481aa7d600830b269071 2024-12-05T00:28:24,110 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/hbase/meta/1588230740/.tmp/info/541257ac8f96481aa7d600830b269071 as hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/hbase/meta/1588230740/info/541257ac8f96481aa7d600830b269071 2024-12-05T00:28:24,114 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/hbase/meta/1588230740/info/541257ac8f96481aa7d600830b269071, entries=5, sequenceid=21, filesize=6.1 K 2024-12-05T00:28:24,115 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~705 B/705, heapSize ~1.29 KB/1320, currentSize=0 B/0 for 1588230740 in 19ms, sequenceid=21, compaction requested=false 2024-12-05T00:28:24,115 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-12-05T00:28:24,115 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 66ee8e24fad26a57122b2ec4f81ae2cf 1/1 column families, dataSize=9.46 KB heapSize=10.38 KB 2024-12-05T00:28:24,119 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/66ee8e24fad26a57122b2ec4f81ae2cf/.tmp/info/5513dfb2f5864ef88e27d3c52db766b1 is 1080, key is row0248/info:/1733358502071/Put/seqid=0 2024-12-05T00:28:24,123 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36047 is added to blk_1073741876_1052 (size=14681) 2024-12-05T00:28:24,123 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44469 is added to blk_1073741876_1052 (size=14681) 2024-12-05T00:28:24,124 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=9.46 KB at sequenceid=332 (bloomFilter=true), to=hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/66ee8e24fad26a57122b2ec4f81ae2cf/.tmp/info/5513dfb2f5864ef88e27d3c52db766b1 2024-12-05T00:28:24,129 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/66ee8e24fad26a57122b2ec4f81ae2cf/.tmp/info/5513dfb2f5864ef88e27d3c52db766b1 as hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/66ee8e24fad26a57122b2ec4f81ae2cf/info/5513dfb2f5864ef88e27d3c52db766b1 2024-12-05T00:28:24,133 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/66ee8e24fad26a57122b2ec4f81ae2cf/info/5513dfb2f5864ef88e27d3c52db766b1, entries=9, sequenceid=332, filesize=14.3 K 2024-12-05T00:28:24,135 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~9.46 KB/9684, heapSize ~10.36 KB/10608, currentSize=0 B/0 for 66ee8e24fad26a57122b2ec4f81ae2cf in 20ms, sequenceid=332, compaction requested=false 2024-12-05T00:28:24,135 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 66ee8e24fad26a57122b2ec4f81ae2cf: 2024-12-05T00:28:24,135 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for f5a5551bdbe933a1af0f5b8ffbd7feee: 2024-12-05T00:28:24,135 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 2113c16e5528%2C45497%2C1733358456539.1733358504135 2024-12-05T00:28:24,139 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:28:24,140 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:28:24,140 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:28:24,140 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:28:24,140 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:28:24,140 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/WALs/2113c16e5528,45497,1733358456539/2113c16e5528%2C45497%2C1733358456539.1733358504085 with entries=2, filesize=723 B; new WAL /user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/WALs/2113c16e5528,45497,1733358456539/2113c16e5528%2C45497%2C1733358456539.1733358504135 2024-12-05T00:28:24,141 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44469 is added to blk_1073741874_1050 (size=731) 2024-12-05T00:28:24,142 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36047 is added to blk_1073741874_1050 (size=731) 2024-12-05T00:28:24,142 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/WALs/2113c16e5528,45497,1733358456539/2113c16e5528%2C45497%2C1733358456539.1733358456916 is not closed yet, will try archiving it next time 2024-12-05T00:28:24,145 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/WALs/2113c16e5528,45497,1733358456539/2113c16e5528%2C45497%2C1733358456539.1733358504085 to hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/oldWALs/2113c16e5528%2C45497%2C1733358456539.1733358504085 2024-12-05T00:28:24,145 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41389:41389),(127.0.0.1/127.0.0.1:35401:35401)] 2024-12-05T00:28:24,145 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/WALs/2113c16e5528,45497,1733358456539/2113c16e5528%2C45497%2C1733358456539.1733358456916 is not closed yet, will try archiving it next time 2024-12-05T00:28:24,146 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [5,000] milli-secs(wait.for.ratio=[1]) 2024-12-05T00:28:24,259 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,45031,1733358326108/2113c16e5528%2C45031%2C1733358326108.1733358326307 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:28:24,259 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,37749,1733358324931/2113c16e5528%2C37749%2C1733358324931.meta.1733358325913.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:28:24,495 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/WALs/2113c16e5528,45497,1733358456539/2113c16e5528%2C45497%2C1733358456539.1733358456916 to hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/oldWALs/2113c16e5528%2C45497%2C1733358456539.1733358456916 2024-12-05T00:28:24,546 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-05T00:28:24,546 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-05T00:28:24,546 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-05T00:28:24,547 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T00:28:24,547 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T00:28:24,547 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-05T00:28:24,547 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-05T00:28:24,547 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=690556816, stopped=false 2024-12-05T00:28:24,547 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=2113c16e5528,38761,1733358456491 2024-12-05T00:28:24,549 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45497-0x1018004f92e0001, quorum=127.0.0.1:58134, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-05T00:28:24,549 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38761-0x1018004f92e0000, quorum=127.0.0.1:58134, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-05T00:28:24,549 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45497-0x1018004f92e0001, quorum=127.0.0.1:58134, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T00:28:24,549 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38761-0x1018004f92e0000, quorum=127.0.0.1:58134, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T00:28:24,549 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-05T00:28:24,549 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:45497-0x1018004f92e0001, quorum=127.0.0.1:58134, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-05T00:28:24,549 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-05T00:28:24,549 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:38761-0x1018004f92e0000, quorum=127.0.0.1:58134, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-05T00:28:24,549 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-05T00:28:24,549 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T00:28:24,550 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '2113c16e5528,45497,1733358456539' ***** 2024-12-05T00:28:24,550 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-05T00:28:24,550 INFO [RS:0;2113c16e5528:45497 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-05T00:28:24,550 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-05T00:28:24,550 INFO [RS:0;2113c16e5528:45497 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-05T00:28:24,550 INFO [RS:0;2113c16e5528:45497 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-05T00:28:24,551 INFO [RS:0;2113c16e5528:45497 {}] regionserver.HRegionServer(3091): Received CLOSE for 66ee8e24fad26a57122b2ec4f81ae2cf 2024-12-05T00:28:24,551 INFO [RS:0;2113c16e5528:45497 {}] regionserver.HRegionServer(3091): Received CLOSE for f5a5551bdbe933a1af0f5b8ffbd7feee 2024-12-05T00:28:24,551 INFO [RS:0;2113c16e5528:45497 {}] regionserver.HRegionServer(959): stopping server 2113c16e5528,45497,1733358456539 2024-12-05T00:28:24,551 INFO [RS:0;2113c16e5528:45497 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-05T00:28:24,551 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 66ee8e24fad26a57122b2ec4f81ae2cf, disabling compactions & flushes 2024-12-05T00:28:24,551 INFO [RS:0;2113c16e5528:45497 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;2113c16e5528:45497. 2024-12-05T00:28:24,551 INFO [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,row0062,1733358471649.66ee8e24fad26a57122b2ec4f81ae2cf. 2024-12-05T00:28:24,551 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,row0062,1733358471649.66ee8e24fad26a57122b2ec4f81ae2cf. 2024-12-05T00:28:24,551 DEBUG [RS:0;2113c16e5528:45497 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-05T00:28:24,551 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,row0062,1733358471649.66ee8e24fad26a57122b2ec4f81ae2cf. after waiting 0 ms 2024-12-05T00:28:24,551 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,row0062,1733358471649.66ee8e24fad26a57122b2ec4f81ae2cf. 2024-12-05T00:28:24,551 DEBUG [RS:0;2113c16e5528:45497 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T00:28:24,551 INFO [RS:0;2113c16e5528:45497 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-05T00:28:24,551 INFO [RS:0;2113c16e5528:45497 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-05T00:28:24,551 INFO [RS:0;2113c16e5528:45497 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-05T00:28:24,551 INFO [RS:0;2113c16e5528:45497 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-05T00:28:24,551 INFO [RS:0;2113c16e5528:45497 {}] regionserver.HRegionServer(1321): Waiting on 3 regions to close 2024-12-05T00:28:24,551 DEBUG [RS:0;2113c16e5528:45497 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, 66ee8e24fad26a57122b2ec4f81ae2cf=TestLogRolling-testLogRolling,row0062,1733358471649.66ee8e24fad26a57122b2ec4f81ae2cf., f5a5551bdbe933a1af0f5b8ffbd7feee=TestLogRolling-testLogRolling,,1733358471649.f5a5551bdbe933a1af0f5b8ffbd7feee.} 2024-12-05T00:28:24,551 DEBUG [RS:0;2113c16e5528:45497 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 66ee8e24fad26a57122b2ec4f81ae2cf, f5a5551bdbe933a1af0f5b8ffbd7feee 2024-12-05T00:28:24,551 DEBUG [RS_CLOSE_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-05T00:28:24,552 INFO [RS_CLOSE_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-05T00:28:24,552 DEBUG [RS_CLOSE_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-05T00:28:24,552 DEBUG [RS_CLOSE_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-05T00:28:24,552 DEBUG [RS_CLOSE_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-05T00:28:24,551 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733358471649.66ee8e24fad26a57122b2ec4f81ae2cf.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/66ee8e24fad26a57122b2ec4f81ae2cf/info/f4813b19d3c1461babda1d90a4fc8f74.aac3f55253ff6dfc9aa5bff573ffec89->hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/aac3f55253ff6dfc9aa5bff573ffec89/info/f4813b19d3c1461babda1d90a4fc8f74-top, hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/66ee8e24fad26a57122b2ec4f81ae2cf/info/TestLogRolling-testLogRolling=aac3f55253ff6dfc9aa5bff573ffec89-36320405a6e544068ff789ce51073ebc, hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/66ee8e24fad26a57122b2ec4f81ae2cf/info/TestLogRolling-testLogRolling=aac3f55253ff6dfc9aa5bff573ffec89-e63154bef1e04d629c303c2678652967, hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/66ee8e24fad26a57122b2ec4f81ae2cf/info/c7598cd2c2174aaa942fb5b095e617a6, hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/66ee8e24fad26a57122b2ec4f81ae2cf/info/TestLogRolling-testLogRolling=aac3f55253ff6dfc9aa5bff573ffec89-b103bb3aa9654ef78cae7f2cfed4db0b, hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/66ee8e24fad26a57122b2ec4f81ae2cf/info/9dbe99aa82054f8ca676808bb8bfd3cc, hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/66ee8e24fad26a57122b2ec4f81ae2cf/info/4920b40441c14fa39e4429db850ddc94, hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/66ee8e24fad26a57122b2ec4f81ae2cf/info/93190ac88bd946d7b33151d54f7fad16, hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/66ee8e24fad26a57122b2ec4f81ae2cf/info/cc0cf24ed217452ab27948dd51379bdf, hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/66ee8e24fad26a57122b2ec4f81ae2cf/info/4a376549b1b84d94921db1eb92a9a4f4, hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/66ee8e24fad26a57122b2ec4f81ae2cf/info/e72fe893029a46bebbb7ac805e486548, hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/66ee8e24fad26a57122b2ec4f81ae2cf/info/aceff16b1e1340b680d3b3d8fc5edd08, hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/66ee8e24fad26a57122b2ec4f81ae2cf/info/a3d71e4547fe431980e417c9e4281ed2, hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/66ee8e24fad26a57122b2ec4f81ae2cf/info/146b77e3cd634e01acf2f147e0899d48, hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/66ee8e24fad26a57122b2ec4f81ae2cf/info/91cdecbaf61f490893644101621dfa68, hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/66ee8e24fad26a57122b2ec4f81ae2cf/info/a7f45b45ba754ab79636274d88d811d7, hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/66ee8e24fad26a57122b2ec4f81ae2cf/info/560822d7f61c4fcfa9026fe971e2b908, hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/66ee8e24fad26a57122b2ec4f81ae2cf/info/67eba21305ff4cb6bbf0dd0137027d89, hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/66ee8e24fad26a57122b2ec4f81ae2cf/info/895649f1633a4ffeb36e95b2b61068cc, hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/66ee8e24fad26a57122b2ec4f81ae2cf/info/b41d4aebf32646f698fb54269d7b41cc, hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/66ee8e24fad26a57122b2ec4f81ae2cf/info/0de39528406c4962b34f786fbdc30611, hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/66ee8e24fad26a57122b2ec4f81ae2cf/info/52d5e760882148f9a766657c8b7991a3] to archive 2024-12-05T00:28:24,553 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733358471649.66ee8e24fad26a57122b2ec4f81ae2cf.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-05T00:28:24,554 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733358471649.66ee8e24fad26a57122b2ec4f81ae2cf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/66ee8e24fad26a57122b2ec4f81ae2cf/info/f4813b19d3c1461babda1d90a4fc8f74.aac3f55253ff6dfc9aa5bff573ffec89 to hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/archive/data/default/TestLogRolling-testLogRolling/66ee8e24fad26a57122b2ec4f81ae2cf/info/f4813b19d3c1461babda1d90a4fc8f74.aac3f55253ff6dfc9aa5bff573ffec89 2024-12-05T00:28:24,556 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733358471649.66ee8e24fad26a57122b2ec4f81ae2cf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/66ee8e24fad26a57122b2ec4f81ae2cf/info/TestLogRolling-testLogRolling=aac3f55253ff6dfc9aa5bff573ffec89-36320405a6e544068ff789ce51073ebc to hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/archive/data/default/TestLogRolling-testLogRolling/66ee8e24fad26a57122b2ec4f81ae2cf/info/TestLogRolling-testLogRolling=aac3f55253ff6dfc9aa5bff573ffec89-36320405a6e544068ff789ce51073ebc 2024-12-05T00:28:24,556 DEBUG [RS_CLOSE_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/hbase/meta/1588230740/recovered.edits/24.seqid, newMaxSeqId=24, maxSeqId=1 2024-12-05T00:28:24,557 DEBUG [RS_CLOSE_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-05T00:28:24,557 INFO [RS_CLOSE_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-05T00:28:24,557 DEBUG [RS_CLOSE_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733358504551Running coprocessor pre-close hooks at 1733358504551Disabling compacts and flushes for region at 1733358504551Disabling writes for close at 1733358504552 (+1 ms)Writing region close event to WAL at 1733358504553 (+1 ms)Running coprocessor post-close hooks at 1733358504557 (+4 ms)Closed at 1733358504557 2024-12-05T00:28:24,557 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733358471649.66ee8e24fad26a57122b2ec4f81ae2cf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/66ee8e24fad26a57122b2ec4f81ae2cf/info/TestLogRolling-testLogRolling=aac3f55253ff6dfc9aa5bff573ffec89-e63154bef1e04d629c303c2678652967 to hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/archive/data/default/TestLogRolling-testLogRolling/66ee8e24fad26a57122b2ec4f81ae2cf/info/TestLogRolling-testLogRolling=aac3f55253ff6dfc9aa5bff573ffec89-e63154bef1e04d629c303c2678652967 2024-12-05T00:28:24,557 DEBUG [RS_CLOSE_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-05T00:28:24,558 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733358471649.66ee8e24fad26a57122b2ec4f81ae2cf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/66ee8e24fad26a57122b2ec4f81ae2cf/info/c7598cd2c2174aaa942fb5b095e617a6 to hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/archive/data/default/TestLogRolling-testLogRolling/66ee8e24fad26a57122b2ec4f81ae2cf/info/c7598cd2c2174aaa942fb5b095e617a6 2024-12-05T00:28:24,559 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733358471649.66ee8e24fad26a57122b2ec4f81ae2cf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/66ee8e24fad26a57122b2ec4f81ae2cf/info/TestLogRolling-testLogRolling=aac3f55253ff6dfc9aa5bff573ffec89-b103bb3aa9654ef78cae7f2cfed4db0b to hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/archive/data/default/TestLogRolling-testLogRolling/66ee8e24fad26a57122b2ec4f81ae2cf/info/TestLogRolling-testLogRolling=aac3f55253ff6dfc9aa5bff573ffec89-b103bb3aa9654ef78cae7f2cfed4db0b 2024-12-05T00:28:24,560 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733358471649.66ee8e24fad26a57122b2ec4f81ae2cf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/66ee8e24fad26a57122b2ec4f81ae2cf/info/9dbe99aa82054f8ca676808bb8bfd3cc to hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/archive/data/default/TestLogRolling-testLogRolling/66ee8e24fad26a57122b2ec4f81ae2cf/info/9dbe99aa82054f8ca676808bb8bfd3cc 2024-12-05T00:28:24,561 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733358471649.66ee8e24fad26a57122b2ec4f81ae2cf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/66ee8e24fad26a57122b2ec4f81ae2cf/info/4920b40441c14fa39e4429db850ddc94 to hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/archive/data/default/TestLogRolling-testLogRolling/66ee8e24fad26a57122b2ec4f81ae2cf/info/4920b40441c14fa39e4429db850ddc94 2024-12-05T00:28:24,562 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733358471649.66ee8e24fad26a57122b2ec4f81ae2cf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/66ee8e24fad26a57122b2ec4f81ae2cf/info/93190ac88bd946d7b33151d54f7fad16 to hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/archive/data/default/TestLogRolling-testLogRolling/66ee8e24fad26a57122b2ec4f81ae2cf/info/93190ac88bd946d7b33151d54f7fad16 2024-12-05T00:28:24,563 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733358471649.66ee8e24fad26a57122b2ec4f81ae2cf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/66ee8e24fad26a57122b2ec4f81ae2cf/info/cc0cf24ed217452ab27948dd51379bdf to hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/archive/data/default/TestLogRolling-testLogRolling/66ee8e24fad26a57122b2ec4f81ae2cf/info/cc0cf24ed217452ab27948dd51379bdf 2024-12-05T00:28:24,564 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733358471649.66ee8e24fad26a57122b2ec4f81ae2cf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/66ee8e24fad26a57122b2ec4f81ae2cf/info/4a376549b1b84d94921db1eb92a9a4f4 to hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/archive/data/default/TestLogRolling-testLogRolling/66ee8e24fad26a57122b2ec4f81ae2cf/info/4a376549b1b84d94921db1eb92a9a4f4 2024-12-05T00:28:24,565 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733358471649.66ee8e24fad26a57122b2ec4f81ae2cf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/66ee8e24fad26a57122b2ec4f81ae2cf/info/e72fe893029a46bebbb7ac805e486548 to hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/archive/data/default/TestLogRolling-testLogRolling/66ee8e24fad26a57122b2ec4f81ae2cf/info/e72fe893029a46bebbb7ac805e486548 2024-12-05T00:28:24,567 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733358471649.66ee8e24fad26a57122b2ec4f81ae2cf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/66ee8e24fad26a57122b2ec4f81ae2cf/info/aceff16b1e1340b680d3b3d8fc5edd08 to hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/archive/data/default/TestLogRolling-testLogRolling/66ee8e24fad26a57122b2ec4f81ae2cf/info/aceff16b1e1340b680d3b3d8fc5edd08 2024-12-05T00:28:24,568 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733358471649.66ee8e24fad26a57122b2ec4f81ae2cf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/66ee8e24fad26a57122b2ec4f81ae2cf/info/a3d71e4547fe431980e417c9e4281ed2 to hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/archive/data/default/TestLogRolling-testLogRolling/66ee8e24fad26a57122b2ec4f81ae2cf/info/a3d71e4547fe431980e417c9e4281ed2 2024-12-05T00:28:24,569 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733358471649.66ee8e24fad26a57122b2ec4f81ae2cf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/66ee8e24fad26a57122b2ec4f81ae2cf/info/146b77e3cd634e01acf2f147e0899d48 to hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/archive/data/default/TestLogRolling-testLogRolling/66ee8e24fad26a57122b2ec4f81ae2cf/info/146b77e3cd634e01acf2f147e0899d48 2024-12-05T00:28:24,570 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733358471649.66ee8e24fad26a57122b2ec4f81ae2cf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/66ee8e24fad26a57122b2ec4f81ae2cf/info/91cdecbaf61f490893644101621dfa68 to hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/archive/data/default/TestLogRolling-testLogRolling/66ee8e24fad26a57122b2ec4f81ae2cf/info/91cdecbaf61f490893644101621dfa68 2024-12-05T00:28:24,571 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733358471649.66ee8e24fad26a57122b2ec4f81ae2cf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/66ee8e24fad26a57122b2ec4f81ae2cf/info/a7f45b45ba754ab79636274d88d811d7 to hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/archive/data/default/TestLogRolling-testLogRolling/66ee8e24fad26a57122b2ec4f81ae2cf/info/a7f45b45ba754ab79636274d88d811d7 2024-12-05T00:28:24,572 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733358471649.66ee8e24fad26a57122b2ec4f81ae2cf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/66ee8e24fad26a57122b2ec4f81ae2cf/info/560822d7f61c4fcfa9026fe971e2b908 to hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/archive/data/default/TestLogRolling-testLogRolling/66ee8e24fad26a57122b2ec4f81ae2cf/info/560822d7f61c4fcfa9026fe971e2b908 2024-12-05T00:28:24,573 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733358471649.66ee8e24fad26a57122b2ec4f81ae2cf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/66ee8e24fad26a57122b2ec4f81ae2cf/info/67eba21305ff4cb6bbf0dd0137027d89 to hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/archive/data/default/TestLogRolling-testLogRolling/66ee8e24fad26a57122b2ec4f81ae2cf/info/67eba21305ff4cb6bbf0dd0137027d89 2024-12-05T00:28:24,574 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733358471649.66ee8e24fad26a57122b2ec4f81ae2cf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/66ee8e24fad26a57122b2ec4f81ae2cf/info/895649f1633a4ffeb36e95b2b61068cc to hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/archive/data/default/TestLogRolling-testLogRolling/66ee8e24fad26a57122b2ec4f81ae2cf/info/895649f1633a4ffeb36e95b2b61068cc 2024-12-05T00:28:24,575 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733358471649.66ee8e24fad26a57122b2ec4f81ae2cf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/66ee8e24fad26a57122b2ec4f81ae2cf/info/b41d4aebf32646f698fb54269d7b41cc to hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/archive/data/default/TestLogRolling-testLogRolling/66ee8e24fad26a57122b2ec4f81ae2cf/info/b41d4aebf32646f698fb54269d7b41cc 2024-12-05T00:28:24,576 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733358471649.66ee8e24fad26a57122b2ec4f81ae2cf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/66ee8e24fad26a57122b2ec4f81ae2cf/info/0de39528406c4962b34f786fbdc30611 to hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/archive/data/default/TestLogRolling-testLogRolling/66ee8e24fad26a57122b2ec4f81ae2cf/info/0de39528406c4962b34f786fbdc30611 2024-12-05T00:28:24,577 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733358471649.66ee8e24fad26a57122b2ec4f81ae2cf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/66ee8e24fad26a57122b2ec4f81ae2cf/info/52d5e760882148f9a766657c8b7991a3 to hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/archive/data/default/TestLogRolling-testLogRolling/66ee8e24fad26a57122b2ec4f81ae2cf/info/52d5e760882148f9a766657c8b7991a3 2024-12-05T00:28:24,577 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733358471649.66ee8e24fad26a57122b2ec4f81ae2cf.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=2113c16e5528:38761 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-12-05T00:28:24,577 WARN [StoreCloser-TestLogRolling-testLogRolling,row0062,1733358471649.66ee8e24fad26a57122b2ec4f81ae2cf.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [c7598cd2c2174aaa942fb5b095e617a6=43081, 9dbe99aa82054f8ca676808bb8bfd3cc=12516, 4920b40441c14fa39e4429db850ddc94=65889, 93190ac88bd946d7b33151d54f7fad16=20078, cc0cf24ed217452ab27948dd51379bdf=17906, 4a376549b1b84d94921db1eb92a9a4f4=94096, e72fe893029a46bebbb7ac805e486548=20078, aceff16b1e1340b680d3b3d8fc5edd08=20078, a3d71e4547fe431980e417c9e4281ed2=116840, 146b77e3cd634e01acf2f147e0899d48=12516, 91cdecbaf61f490893644101621dfa68=20078, a7f45b45ba754ab79636274d88d811d7=147331, 560822d7f61c4fcfa9026fe971e2b908=20078, 67eba21305ff4cb6bbf0dd0137027d89=12521, 895649f1633a4ffeb36e95b2b61068cc=179809, b41d4aebf32646f698fb54269d7b41cc=29807, 0de39528406c4962b34f786fbdc30611=12523, 52d5e760882148f9a766657c8b7991a3=24412] 2024-12-05T00:28:24,581 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/66ee8e24fad26a57122b2ec4f81ae2cf/recovered.edits/335.seqid, newMaxSeqId=335, maxSeqId=126 2024-12-05T00:28:24,581 INFO [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,row0062,1733358471649.66ee8e24fad26a57122b2ec4f81ae2cf. 2024-12-05T00:28:24,581 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 66ee8e24fad26a57122b2ec4f81ae2cf: Waiting for close lock at 1733358504551Running coprocessor pre-close hooks at 1733358504551Disabling compacts and flushes for region at 1733358504551Disabling writes for close at 1733358504551Writing region close event to WAL at 1733358504578 (+27 ms)Running coprocessor post-close hooks at 1733358504581 (+3 ms)Closed at 1733358504581 2024-12-05T00:28:24,582 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRolling,row0062,1733358471649.66ee8e24fad26a57122b2ec4f81ae2cf. 2024-12-05T00:28:24,582 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing f5a5551bdbe933a1af0f5b8ffbd7feee, disabling compactions & flushes 2024-12-05T00:28:24,582 INFO [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1733358471649.f5a5551bdbe933a1af0f5b8ffbd7feee. 2024-12-05T00:28:24,582 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1733358471649.f5a5551bdbe933a1af0f5b8ffbd7feee. 2024-12-05T00:28:24,582 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1733358471649.f5a5551bdbe933a1af0f5b8ffbd7feee. after waiting 0 ms 2024-12-05T00:28:24,582 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1733358471649.f5a5551bdbe933a1af0f5b8ffbd7feee. 2024-12-05T00:28:24,582 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733358471649.f5a5551bdbe933a1af0f5b8ffbd7feee.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/f5a5551bdbe933a1af0f5b8ffbd7feee/info/f4813b19d3c1461babda1d90a4fc8f74.aac3f55253ff6dfc9aa5bff573ffec89->hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/aac3f55253ff6dfc9aa5bff573ffec89/info/f4813b19d3c1461babda1d90a4fc8f74-bottom] to archive 2024-12-05T00:28:24,583 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733358471649.f5a5551bdbe933a1af0f5b8ffbd7feee.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-05T00:28:24,584 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733358471649.f5a5551bdbe933a1af0f5b8ffbd7feee.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/f5a5551bdbe933a1af0f5b8ffbd7feee/info/f4813b19d3c1461babda1d90a4fc8f74.aac3f55253ff6dfc9aa5bff573ffec89 to hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/archive/data/default/TestLogRolling-testLogRolling/f5a5551bdbe933a1af0f5b8ffbd7feee/info/f4813b19d3c1461babda1d90a4fc8f74.aac3f55253ff6dfc9aa5bff573ffec89 2024-12-05T00:28:24,584 WARN [StoreCloser-TestLogRolling-testLogRolling,,1733358471649.f5a5551bdbe933a1af0f5b8ffbd7feee.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [] 2024-12-05T00:28:24,587 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/data/default/TestLogRolling-testLogRolling/f5a5551bdbe933a1af0f5b8ffbd7feee/recovered.edits/131.seqid, newMaxSeqId=131, maxSeqId=126 2024-12-05T00:28:24,587 INFO [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1733358471649.f5a5551bdbe933a1af0f5b8ffbd7feee. 2024-12-05T00:28:24,588 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for f5a5551bdbe933a1af0f5b8ffbd7feee: Waiting for close lock at 1733358504582Running coprocessor pre-close hooks at 1733358504582Disabling compacts and flushes for region at 1733358504582Disabling writes for close at 1733358504582Writing region close event to WAL at 1733358504584 (+2 ms)Running coprocessor post-close hooks at 1733358504587 (+3 ms)Closed at 1733358504587 2024-12-05T00:28:24,588 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRolling,,1733358471649.f5a5551bdbe933a1af0f5b8ffbd7feee. 2024-12-05T00:28:24,743 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-05T00:28:24,744 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-05T00:28:24,744 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling 2024-12-05T00:28:24,752 INFO [RS:0;2113c16e5528:45497 {}] regionserver.HRegionServer(976): stopping server 2113c16e5528,45497,1733358456539; all regions closed. 2024-12-05T00:28:24,752 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:28:24,752 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:28:24,752 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:28:24,752 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:28:24,753 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:28:24,754 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44469 is added to blk_1073741834_1010 (size=8107) 2024-12-05T00:28:24,755 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36047 is added to blk_1073741834_1010 (size=8107) 2024-12-05T00:28:24,757 DEBUG [RS:0;2113c16e5528:45497 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/oldWALs 2024-12-05T00:28:24,757 INFO [RS:0;2113c16e5528:45497 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 2113c16e5528%2C45497%2C1733358456539.meta:.meta(num 1733358457286) 2024-12-05T00:28:24,757 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:28:24,757 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:28:24,757 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:28:24,757 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:28:24,757 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:28:24,758 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44469 is added to blk_1073741877_1053 (size=780) 2024-12-05T00:28:24,759 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36047 is added to blk_1073741877_1053 (size=780) 2024-12-05T00:28:24,761 DEBUG [RS:0;2113c16e5528:45497 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/oldWALs 2024-12-05T00:28:24,761 INFO [RS:0;2113c16e5528:45497 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 2113c16e5528%2C45497%2C1733358456539:(num 1733358504135) 2024-12-05T00:28:24,761 DEBUG [RS:0;2113c16e5528:45497 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T00:28:24,761 INFO [RS:0;2113c16e5528:45497 {}] regionserver.LeaseManager(133): Closed leases 2024-12-05T00:28:24,761 INFO [RS:0;2113c16e5528:45497 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-05T00:28:24,761 INFO [RS:0;2113c16e5528:45497 {}] hbase.ChoreService(370): Chore service for: regionserver/2113c16e5528:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-05T00:28:24,761 INFO [RS:0;2113c16e5528:45497 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-05T00:28:24,761 INFO [regionserver/2113c16e5528:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-05T00:28:24,761 INFO [RS:0;2113c16e5528:45497 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:45497 2024-12-05T00:28:24,765 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45497-0x1018004f92e0001, quorum=127.0.0.1:58134, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/2113c16e5528,45497,1733358456539 2024-12-05T00:28:24,765 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38761-0x1018004f92e0000, quorum=127.0.0.1:58134, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-05T00:28:24,765 INFO [RS:0;2113c16e5528:45497 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-05T00:28:24,767 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [2113c16e5528,45497,1733358456539] 2024-12-05T00:28:24,767 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/2113c16e5528,45497,1733358456539 already deleted, retry=false 2024-12-05T00:28:24,767 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 2113c16e5528,45497,1733358456539 expired; onlineServers=0 2024-12-05T00:28:24,768 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '2113c16e5528,38761,1733358456491' ***** 2024-12-05T00:28:24,768 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-05T00:28:24,768 INFO [M:0;2113c16e5528:38761 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-05T00:28:24,768 INFO [M:0;2113c16e5528:38761 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-05T00:28:24,768 DEBUG [M:0;2113c16e5528:38761 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-05T00:28:24,768 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-05T00:28:24,768 DEBUG [M:0;2113c16e5528:38761 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-05T00:28:24,768 DEBUG [master/2113c16e5528:0:becomeActiveMaster-HFileCleaner.small.0-1733358456686 {}] cleaner.HFileCleaner(306): Exit Thread[master/2113c16e5528:0:becomeActiveMaster-HFileCleaner.small.0-1733358456686,5,FailOnTimeoutGroup] 2024-12-05T00:28:24,768 DEBUG [master/2113c16e5528:0:becomeActiveMaster-HFileCleaner.large.0-1733358456686 {}] cleaner.HFileCleaner(306): Exit Thread[master/2113c16e5528:0:becomeActiveMaster-HFileCleaner.large.0-1733358456686,5,FailOnTimeoutGroup] 2024-12-05T00:28:24,768 INFO [M:0;2113c16e5528:38761 {}] hbase.ChoreService(370): Chore service for: master/2113c16e5528:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-05T00:28:24,768 INFO [M:0;2113c16e5528:38761 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-05T00:28:24,768 DEBUG [M:0;2113c16e5528:38761 {}] master.HMaster(1795): Stopping service threads 2024-12-05T00:28:24,768 INFO [M:0;2113c16e5528:38761 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-05T00:28:24,768 INFO [M:0;2113c16e5528:38761 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-05T00:28:24,768 INFO [M:0;2113c16e5528:38761 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-05T00:28:24,769 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-05T00:28:24,769 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38761-0x1018004f92e0000, quorum=127.0.0.1:58134, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-05T00:28:24,769 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38761-0x1018004f92e0000, quorum=127.0.0.1:58134, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T00:28:24,769 DEBUG [M:0;2113c16e5528:38761 {}] zookeeper.ZKUtil(347): master:38761-0x1018004f92e0000, quorum=127.0.0.1:58134, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-05T00:28:24,769 WARN [M:0;2113c16e5528:38761 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-05T00:28:24,770 INFO [M:0;2113c16e5528:38761 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/.lastflushedseqids 2024-12-05T00:28:24,774 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36047 is added to blk_1073741878_1054 (size=228) 2024-12-05T00:28:24,774 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44469 is added to blk_1073741878_1054 (size=228) 2024-12-05T00:28:24,775 INFO [M:0;2113c16e5528:38761 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-05T00:28:24,775 INFO [M:0;2113c16e5528:38761 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-05T00:28:24,775 DEBUG [M:0;2113c16e5528:38761 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-05T00:28:24,775 INFO [M:0;2113c16e5528:38761 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-05T00:28:24,775 DEBUG [M:0;2113c16e5528:38761 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-05T00:28:24,775 DEBUG [M:0;2113c16e5528:38761 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-05T00:28:24,775 DEBUG [M:0;2113c16e5528:38761 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-05T00:28:24,775 INFO [M:0;2113c16e5528:38761 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=53.70 KB heapSize=65.92 KB 2024-12-05T00:28:24,786 INFO [regionserver/2113c16e5528:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-05T00:28:24,790 DEBUG [M:0;2113c16e5528:38761 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/9b520606592247898d9dd4af52f01625 is 82, key is hbase:meta,,1/info:regioninfo/1733358457312/Put/seqid=0 2024-12-05T00:28:24,797 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44469 is added to blk_1073741879_1055 (size=5672) 2024-12-05T00:28:24,798 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36047 is added to blk_1073741879_1055 (size=5672) 2024-12-05T00:28:24,798 INFO [M:0;2113c16e5528:38761 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=129 (bloomFilter=true), to=hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/9b520606592247898d9dd4af52f01625 2024-12-05T00:28:24,816 DEBUG [M:0;2113c16e5528:38761 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/3410e5b174b8498682541ecd8cbe5041 is 750, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1733358457730/Put/seqid=0 2024-12-05T00:28:24,820 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36047 is added to blk_1073741880_1056 (size=7680) 2024-12-05T00:28:24,821 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44469 is added to blk_1073741880_1056 (size=7680) 2024-12-05T00:28:24,821 INFO [M:0;2113c16e5528:38761 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.09 KB at sequenceid=129 (bloomFilter=true), to=hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/3410e5b174b8498682541ecd8cbe5041 2024-12-05T00:28:24,825 INFO [M:0;2113c16e5528:38761 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 3410e5b174b8498682541ecd8cbe5041 2024-12-05T00:28:24,839 DEBUG [M:0;2113c16e5528:38761 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/c3e41f0bb6044fe1aacbb6a30de8112a is 69, key is 2113c16e5528,45497,1733358456539/rs:state/1733358456775/Put/seqid=0 2024-12-05T00:28:24,843 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36047 is added to blk_1073741881_1057 (size=5156) 2024-12-05T00:28:24,844 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44469 is added to blk_1073741881_1057 (size=5156) 2024-12-05T00:28:24,844 INFO [M:0;2113c16e5528:38761 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=129 (bloomFilter=true), to=hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/c3e41f0bb6044fe1aacbb6a30de8112a 2024-12-05T00:28:24,862 DEBUG [M:0;2113c16e5528:38761 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/02961513814f4126943c3845a9737c1d is 52, key is load_balancer_on/state:d/1733358457363/Put/seqid=0 2024-12-05T00:28:24,866 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36047 is added to blk_1073741882_1058 (size=5056) 2024-12-05T00:28:24,867 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44469 is added to blk_1073741882_1058 (size=5056) 2024-12-05T00:28:24,867 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45497-0x1018004f92e0001, quorum=127.0.0.1:58134, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-05T00:28:24,867 INFO [RS:0;2113c16e5528:45497 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-05T00:28:24,867 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45497-0x1018004f92e0001, quorum=127.0.0.1:58134, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-05T00:28:24,867 INFO [RS:0;2113c16e5528:45497 {}] regionserver.HRegionServer(1031): Exiting; stopping=2113c16e5528,45497,1733358456539; zookeeper connection closed. 2024-12-05T00:28:24,867 INFO [M:0;2113c16e5528:38761 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=129 (bloomFilter=true), to=hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/02961513814f4126943c3845a9737c1d 2024-12-05T00:28:24,867 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@3668ab6b {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@3668ab6b 2024-12-05T00:28:24,867 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-12-05T00:28:24,871 DEBUG [M:0;2113c16e5528:38761 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/9b520606592247898d9dd4af52f01625 as hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/9b520606592247898d9dd4af52f01625 2024-12-05T00:28:24,875 INFO [M:0;2113c16e5528:38761 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/9b520606592247898d9dd4af52f01625, entries=8, sequenceid=129, filesize=5.5 K 2024-12-05T00:28:24,876 DEBUG [M:0;2113c16e5528:38761 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/3410e5b174b8498682541ecd8cbe5041 as hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/3410e5b174b8498682541ecd8cbe5041 2024-12-05T00:28:24,879 INFO [M:0;2113c16e5528:38761 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 3410e5b174b8498682541ecd8cbe5041 2024-12-05T00:28:24,879 INFO [M:0;2113c16e5528:38761 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/3410e5b174b8498682541ecd8cbe5041, entries=14, sequenceid=129, filesize=7.5 K 2024-12-05T00:28:24,880 DEBUG [M:0;2113c16e5528:38761 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/c3e41f0bb6044fe1aacbb6a30de8112a as hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/c3e41f0bb6044fe1aacbb6a30de8112a 2024-12-05T00:28:24,884 INFO [M:0;2113c16e5528:38761 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/c3e41f0bb6044fe1aacbb6a30de8112a, entries=1, sequenceid=129, filesize=5.0 K 2024-12-05T00:28:24,884 DEBUG [M:0;2113c16e5528:38761 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/02961513814f4126943c3845a9737c1d as hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/02961513814f4126943c3845a9737c1d 2024-12-05T00:28:24,888 INFO [M:0;2113c16e5528:38761 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45045/user/jenkins/test-data/8373e16f-1f7f-ffff-187f-7811ed263f30/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/02961513814f4126943c3845a9737c1d, entries=1, sequenceid=129, filesize=4.9 K 2024-12-05T00:28:24,889 INFO [M:0;2113c16e5528:38761 {}] regionserver.HRegion(3140): Finished flush of dataSize ~53.70 KB/54985, heapSize ~65.86 KB/67440, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 114ms, sequenceid=129, compaction requested=false 2024-12-05T00:28:24,890 INFO [M:0;2113c16e5528:38761 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-05T00:28:24,890 DEBUG [M:0;2113c16e5528:38761 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733358504775Disabling compacts and flushes for region at 1733358504775Disabling writes for close at 1733358504775Obtaining lock to block concurrent updates at 1733358504775Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733358504775Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=54985, getHeapSize=67440, getOffHeapSize=0, getCellsCount=152 at 1733358504776 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733358504776Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733358504776Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733358504790 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733358504790Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733358504802 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733358504815 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733358504815Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733358504825 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733358504838 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733358504838Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1733358504848 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1733358504862 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1733358504862Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@57a62c30: reopening flushed file at 1733358504871 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@563f89ce: reopening flushed file at 1733358504875 (+4 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4c053b8b: reopening flushed file at 1733358504879 (+4 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@11d980a: reopening flushed file at 1733358504884 (+5 ms)Finished flush of dataSize ~53.70 KB/54985, heapSize ~65.86 KB/67440, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 114ms, sequenceid=129, compaction requested=false at 1733358504889 (+5 ms)Writing region close event to WAL at 1733358504890 (+1 ms)Closed at 1733358504890 2024-12-05T00:28:24,890 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:28:24,891 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:28:24,891 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:28:24,891 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:28:24,891 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:28:24,892 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36047 is added to blk_1073741830_1006 (size=63915) 2024-12-05T00:28:24,893 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44469 is added to blk_1073741830_1006 (size=63915) 2024-12-05T00:28:24,893 INFO [M:0;2113c16e5528:38761 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-05T00:28:24,893 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-05T00:28:24,893 INFO [M:0;2113c16e5528:38761 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:38761 2024-12-05T00:28:24,894 INFO [M:0;2113c16e5528:38761 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-05T00:28:24,997 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38761-0x1018004f92e0000, quorum=127.0.0.1:58134, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-05T00:28:24,997 INFO [M:0;2113c16e5528:38761 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-05T00:28:24,997 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38761-0x1018004f92e0000, quorum=127.0.0.1:58134, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-05T00:28:25,000 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@27cee48d{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-05T00:28:25,000 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7b29c022{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-05T00:28:25,000 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-05T00:28:25,000 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@314e7370{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-05T00:28:25,000 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@53298b3d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c4dcb673-d54e-1ea6-383b-99332c3b6cc8/hadoop.log.dir/,STOPPED} 2024-12-05T00:28:25,002 WARN [BP-709715538-172.17.0.2-1733358455829 heartbeating to localhost/127.0.0.1:45045 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-05T00:28:25,002 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-05T00:28:25,002 WARN [BP-709715538-172.17.0.2-1733358455829 heartbeating to localhost/127.0.0.1:45045 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-709715538-172.17.0.2-1733358455829 (Datanode Uuid 68280e14-3a57-48ad-b823-905d84844bb5) service to localhost/127.0.0.1:45045 2024-12-05T00:28:25,002 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-05T00:28:25,003 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c4dcb673-d54e-1ea6-383b-99332c3b6cc8/cluster_938ee5fe-d9d1-29e3-faad-cab85c93ca9a/data/data3/current/BP-709715538-172.17.0.2-1733358455829 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-05T00:28:25,003 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c4dcb673-d54e-1ea6-383b-99332c3b6cc8/cluster_938ee5fe-d9d1-29e3-faad-cab85c93ca9a/data/data4/current/BP-709715538-172.17.0.2-1733358455829 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-05T00:28:25,003 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-05T00:28:25,005 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1091e18a{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-05T00:28:25,005 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3221a4aa{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-05T00:28:25,005 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-05T00:28:25,005 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3d639fc0{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-05T00:28:25,005 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7fc50460{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c4dcb673-d54e-1ea6-383b-99332c3b6cc8/hadoop.log.dir/,STOPPED} 2024-12-05T00:28:25,007 WARN [BP-709715538-172.17.0.2-1733358455829 heartbeating to localhost/127.0.0.1:45045 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-05T00:28:25,007 WARN [BP-709715538-172.17.0.2-1733358455829 heartbeating to localhost/127.0.0.1:45045 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-709715538-172.17.0.2-1733358455829 (Datanode Uuid 922058ce-75e1-4506-a65a-f8311b2cfa1d) service to localhost/127.0.0.1:45045 2024-12-05T00:28:25,007 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-05T00:28:25,007 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-05T00:28:25,007 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c4dcb673-d54e-1ea6-383b-99332c3b6cc8/cluster_938ee5fe-d9d1-29e3-faad-cab85c93ca9a/data/data1/current/BP-709715538-172.17.0.2-1733358455829 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-05T00:28:25,008 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c4dcb673-d54e-1ea6-383b-99332c3b6cc8/cluster_938ee5fe-d9d1-29e3-faad-cab85c93ca9a/data/data2/current/BP-709715538-172.17.0.2-1733358455829 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-05T00:28:25,008 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-05T00:28:25,014 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@188ddc10{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-05T00:28:25,014 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5fc37f93{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-05T00:28:25,014 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-05T00:28:25,014 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@240fc28c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-05T00:28:25,014 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@59703725{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c4dcb673-d54e-1ea6-383b-99332c3b6cc8/hadoop.log.dir/,STOPPED} 2024-12-05T00:28:25,021 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-05T00:28:25,047 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-05T00:28:25,056 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRolling Thread=230 (was 206) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:45045 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:45045 from jenkins.hfs.6 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: LeaseRenewer:jenkins@localhost:45045 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:45045 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:45045 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:45045 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-38-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:45045 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:45045 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.6@localhost:45045 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=509 (was 485) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=19 (was 45), ProcessCount=11 (was 11), AvailableMemoryMB=8600 (was 8649) 2024-12-05T00:28:25,064 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnNothingWritten Thread=230, OpenFileDescriptor=509, MaxFileDescriptor=1048576, SystemLoadAverage=19, ProcessCount=11, AvailableMemoryMB=8601 2024-12-05T00:28:25,064 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-05T00:28:25,064 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c4dcb673-d54e-1ea6-383b-99332c3b6cc8/hadoop.log.dir so I do NOT create it in target/test-data/532e03e4-cec1-085d-e50d-436336e3c57c 2024-12-05T00:28:25,064 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c4dcb673-d54e-1ea6-383b-99332c3b6cc8/hadoop.tmp.dir so I do NOT create it in target/test-data/532e03e4-cec1-085d-e50d-436336e3c57c 2024-12-05T00:28:25,064 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/532e03e4-cec1-085d-e50d-436336e3c57c/cluster_ede70a14-c4b0-f197-fc70-ae7362d8b6c6, deleteOnExit=true 2024-12-05T00:28:25,064 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-12-05T00:28:25,064 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/532e03e4-cec1-085d-e50d-436336e3c57c/test.cache.data in system properties and HBase conf 2024-12-05T00:28:25,065 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/532e03e4-cec1-085d-e50d-436336e3c57c/hadoop.tmp.dir in system properties and HBase conf 2024-12-05T00:28:25,065 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/532e03e4-cec1-085d-e50d-436336e3c57c/hadoop.log.dir in system properties and HBase conf 2024-12-05T00:28:25,065 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/532e03e4-cec1-085d-e50d-436336e3c57c/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-05T00:28:25,065 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/532e03e4-cec1-085d-e50d-436336e3c57c/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-05T00:28:25,065 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-05T00:28:25,065 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-05T00:28:25,065 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/532e03e4-cec1-085d-e50d-436336e3c57c/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-05T00:28:25,065 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/532e03e4-cec1-085d-e50d-436336e3c57c/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-05T00:28:25,065 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/532e03e4-cec1-085d-e50d-436336e3c57c/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-05T00:28:25,065 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/532e03e4-cec1-085d-e50d-436336e3c57c/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-05T00:28:25,065 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/532e03e4-cec1-085d-e50d-436336e3c57c/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-05T00:28:25,065 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/532e03e4-cec1-085d-e50d-436336e3c57c/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-05T00:28:25,065 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/532e03e4-cec1-085d-e50d-436336e3c57c/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-05T00:28:25,065 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/532e03e4-cec1-085d-e50d-436336e3c57c/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-05T00:28:25,065 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/532e03e4-cec1-085d-e50d-436336e3c57c/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-05T00:28:25,065 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/532e03e4-cec1-085d-e50d-436336e3c57c/nfs.dump.dir in system properties and HBase conf 2024-12-05T00:28:25,065 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/532e03e4-cec1-085d-e50d-436336e3c57c/java.io.tmpdir in system properties and HBase conf 2024-12-05T00:28:25,066 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/532e03e4-cec1-085d-e50d-436336e3c57c/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-05T00:28:25,066 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/532e03e4-cec1-085d-e50d-436336e3c57c/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-05T00:28:25,066 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/532e03e4-cec1-085d-e50d-436336e3c57c/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-05T00:28:25,078 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-05T00:28:25,138 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-05T00:28:25,141 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-05T00:28:25,142 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-05T00:28:25,142 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-05T00:28:25,142 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-05T00:28:25,143 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-05T00:28:25,143 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3317cc02{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/532e03e4-cec1-085d-e50d-436336e3c57c/hadoop.log.dir/,AVAILABLE} 2024-12-05T00:28:25,144 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5a248f23{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-05T00:28:25,256 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7c411d10{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/532e03e4-cec1-085d-e50d-436336e3c57c/java.io.tmpdir/jetty-localhost-46535-hadoop-hdfs-3_4_1-tests_jar-_-any-1059510444136367032/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-05T00:28:25,257 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5a06f288{HTTP/1.1, (http/1.1)}{localhost:46535} 2024-12-05T00:28:25,257 INFO [Time-limited test {}] server.Server(415): Started @285884ms 2024-12-05T00:28:25,260 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,45031,1733358326108/2113c16e5528%2C45031%2C1733358326108.1733358326307 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:28:25,260 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,37749,1733358324931/2113c16e5528%2C37749%2C1733358324931.meta.1733358325913.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:28:25,270 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-05T00:28:25,318 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-05T00:28:25,320 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-05T00:28:25,321 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-05T00:28:25,321 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-05T00:28:25,321 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-05T00:28:25,322 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6a21ffab{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/532e03e4-cec1-085d-e50d-436336e3c57c/hadoop.log.dir/,AVAILABLE} 2024-12-05T00:28:25,322 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@665d17bc{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-05T00:28:25,436 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@a6bcfcb{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/532e03e4-cec1-085d-e50d-436336e3c57c/java.io.tmpdir/jetty-localhost-43297-hadoop-hdfs-3_4_1-tests_jar-_-any-14939391138433158889/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-05T00:28:25,436 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@60c41d21{HTTP/1.1, (http/1.1)}{localhost:43297} 2024-12-05T00:28:25,436 INFO [Time-limited test {}] server.Server(415): Started @286063ms 2024-12-05T00:28:25,437 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-05T00:28:25,467 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-05T00:28:25,469 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-05T00:28:25,470 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-05T00:28:25,470 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-05T00:28:25,470 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-05T00:28:25,471 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4d7e0513{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/532e03e4-cec1-085d-e50d-436336e3c57c/hadoop.log.dir/,AVAILABLE} 2024-12-05T00:28:25,471 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2191d18b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-05T00:28:25,535 WARN [Thread-2468 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/532e03e4-cec1-085d-e50d-436336e3c57c/cluster_ede70a14-c4b0-f197-fc70-ae7362d8b6c6/data/data2/current/BP-805312939-172.17.0.2-1733358505084/current, will proceed with Du for space computation calculation, 2024-12-05T00:28:25,535 WARN [Thread-2467 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/532e03e4-cec1-085d-e50d-436336e3c57c/cluster_ede70a14-c4b0-f197-fc70-ae7362d8b6c6/data/data1/current/BP-805312939-172.17.0.2-1733358505084/current, will proceed with Du for space computation calculation, 2024-12-05T00:28:25,553 WARN [Thread-2446 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-05T00:28:25,555 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x51006cdcdd2c904d with lease ID 0x61d27e5e14c9e877: Processing first storage report for DS-6af5e674-2830-4b59-bb9a-f32c5ca7b460 from datanode DatanodeRegistration(127.0.0.1:36519, datanodeUuid=41c51d21-00bb-489d-b261-f37af0c529db, infoPort=33171, infoSecurePort=0, ipcPort=45955, storageInfo=lv=-57;cid=testClusterID;nsid=86233965;c=1733358505084) 2024-12-05T00:28:25,555 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x51006cdcdd2c904d with lease ID 0x61d27e5e14c9e877: from storage DS-6af5e674-2830-4b59-bb9a-f32c5ca7b460 node DatanodeRegistration(127.0.0.1:36519, datanodeUuid=41c51d21-00bb-489d-b261-f37af0c529db, infoPort=33171, infoSecurePort=0, ipcPort=45955, storageInfo=lv=-57;cid=testClusterID;nsid=86233965;c=1733358505084), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-05T00:28:25,555 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x51006cdcdd2c904d with lease ID 0x61d27e5e14c9e877: Processing first storage report for DS-6781a247-bb7c-4fde-9318-40860b2a3844 from datanode DatanodeRegistration(127.0.0.1:36519, datanodeUuid=41c51d21-00bb-489d-b261-f37af0c529db, infoPort=33171, infoSecurePort=0, ipcPort=45955, storageInfo=lv=-57;cid=testClusterID;nsid=86233965;c=1733358505084) 2024-12-05T00:28:25,555 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x51006cdcdd2c904d with lease ID 0x61d27e5e14c9e877: from storage DS-6781a247-bb7c-4fde-9318-40860b2a3844 node DatanodeRegistration(127.0.0.1:36519, datanodeUuid=41c51d21-00bb-489d-b261-f37af0c529db, infoPort=33171, infoSecurePort=0, ipcPort=45955, storageInfo=lv=-57;cid=testClusterID;nsid=86233965;c=1733358505084), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-05T00:28:25,586 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2d0ad989{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/532e03e4-cec1-085d-e50d-436336e3c57c/java.io.tmpdir/jetty-localhost-42815-hadoop-hdfs-3_4_1-tests_jar-_-any-15605286221561050540/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-05T00:28:25,586 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7e58a9be{HTTP/1.1, (http/1.1)}{localhost:42815} 2024-12-05T00:28:25,586 INFO [Time-limited test {}] server.Server(415): Started @286213ms 2024-12-05T00:28:25,587 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-05T00:28:25,685 WARN [Thread-2493 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/532e03e4-cec1-085d-e50d-436336e3c57c/cluster_ede70a14-c4b0-f197-fc70-ae7362d8b6c6/data/data3/current/BP-805312939-172.17.0.2-1733358505084/current, will proceed with Du for space computation calculation, 2024-12-05T00:28:25,685 WARN [Thread-2494 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/532e03e4-cec1-085d-e50d-436336e3c57c/cluster_ede70a14-c4b0-f197-fc70-ae7362d8b6c6/data/data4/current/BP-805312939-172.17.0.2-1733358505084/current, will proceed with Du for space computation calculation, 2024-12-05T00:28:25,701 WARN [Thread-2482 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-05T00:28:25,703 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xbfe3703147effd3e with lease ID 0x61d27e5e14c9e878: Processing first storage report for DS-81058d37-815c-40da-8275-fe7692b99377 from datanode DatanodeRegistration(127.0.0.1:44693, datanodeUuid=a259d140-b6d8-4442-af8d-c84d828241af, infoPort=34223, infoSecurePort=0, ipcPort=42161, storageInfo=lv=-57;cid=testClusterID;nsid=86233965;c=1733358505084) 2024-12-05T00:28:25,703 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xbfe3703147effd3e with lease ID 0x61d27e5e14c9e878: from storage DS-81058d37-815c-40da-8275-fe7692b99377 node DatanodeRegistration(127.0.0.1:44693, datanodeUuid=a259d140-b6d8-4442-af8d-c84d828241af, infoPort=34223, infoSecurePort=0, ipcPort=42161, storageInfo=lv=-57;cid=testClusterID;nsid=86233965;c=1733358505084), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-05T00:28:25,703 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xbfe3703147effd3e with lease ID 0x61d27e5e14c9e878: Processing first storage report for DS-4cfc8a57-54d7-4363-b813-d3a7baf9e60c from datanode DatanodeRegistration(127.0.0.1:44693, datanodeUuid=a259d140-b6d8-4442-af8d-c84d828241af, infoPort=34223, infoSecurePort=0, ipcPort=42161, storageInfo=lv=-57;cid=testClusterID;nsid=86233965;c=1733358505084) 2024-12-05T00:28:25,703 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xbfe3703147effd3e with lease ID 0x61d27e5e14c9e878: from storage DS-4cfc8a57-54d7-4363-b813-d3a7baf9e60c node DatanodeRegistration(127.0.0.1:44693, datanodeUuid=a259d140-b6d8-4442-af8d-c84d828241af, infoPort=34223, infoSecurePort=0, ipcPort=42161, storageInfo=lv=-57;cid=testClusterID;nsid=86233965;c=1733358505084), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-05T00:28:25,708 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/532e03e4-cec1-085d-e50d-436336e3c57c 2024-12-05T00:28:25,711 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/532e03e4-cec1-085d-e50d-436336e3c57c/cluster_ede70a14-c4b0-f197-fc70-ae7362d8b6c6/zookeeper_0, clientPort=60225, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/532e03e4-cec1-085d-e50d-436336e3c57c/cluster_ede70a14-c4b0-f197-fc70-ae7362d8b6c6/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/532e03e4-cec1-085d-e50d-436336e3c57c/cluster_ede70a14-c4b0-f197-fc70-ae7362d8b6c6/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-05T00:28:25,711 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=60225 2024-12-05T00:28:25,712 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-05T00:28:25,713 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-05T00:28:25,720 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44693 is added to blk_1073741825_1001 (size=7) 2024-12-05T00:28:25,721 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741825_1001 (size=7) 2024-12-05T00:28:25,722 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:43921/user/jenkins/test-data/4b039514-2647-14e9-144e-39d9b7cbc821 with version=8 2024-12-05T00:28:25,722 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:45273/user/jenkins/test-data/7f70659c-3a14-bb99-79d3-4868f5326e37/hbase-staging 2024-12-05T00:28:25,724 INFO [Time-limited test {}] client.ConnectionUtils(128): master/2113c16e5528:0 server-side Connection retries=45 2024-12-05T00:28:25,724 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-05T00:28:25,724 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-05T00:28:25,724 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-05T00:28:25,724 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-05T00:28:25,724 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-05T00:28:25,724 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-05T00:28:25,724 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-05T00:28:25,725 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:40407 2024-12-05T00:28:25,726 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:40407 connecting to ZooKeeper ensemble=127.0.0.1:60225 2024-12-05T00:28:25,732 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:404070x0, quorum=127.0.0.1:60225, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-05T00:28:25,733 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:40407-0x1018005b97e0000 connected 2024-12-05T00:28:25,751 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-05T00:28:25,753 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-05T00:28:25,754 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:40407-0x1018005b97e0000, quorum=127.0.0.1:60225, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-05T00:28:25,755 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:43921/user/jenkins/test-data/4b039514-2647-14e9-144e-39d9b7cbc821, hbase.cluster.distributed=false 2024-12-05T00:28:25,756 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:40407-0x1018005b97e0000, quorum=127.0.0.1:60225, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-05T00:28:25,757 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=40407 2024-12-05T00:28:25,757 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=40407 2024-12-05T00:28:25,757 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=40407 2024-12-05T00:28:25,757 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=40407 2024-12-05T00:28:25,758 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=40407 2024-12-05T00:28:25,772 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/2113c16e5528:0 server-side Connection retries=45 2024-12-05T00:28:25,772 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-05T00:28:25,772 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-05T00:28:25,772 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-05T00:28:25,772 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-05T00:28:25,773 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-05T00:28:25,773 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-05T00:28:25,773 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-05T00:28:25,773 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:40355 2024-12-05T00:28:25,774 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:40355 connecting to ZooKeeper ensemble=127.0.0.1:60225 2024-12-05T00:28:25,775 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-05T00:28:25,776 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-05T00:28:25,780 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:403550x0, quorum=127.0.0.1:60225, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-05T00:28:25,780 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:403550x0, quorum=127.0.0.1:60225, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-05T00:28:25,780 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:40355-0x1018005b97e0001 connected 2024-12-05T00:28:25,780 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-05T00:28:25,781 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-05T00:28:25,782 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:40355-0x1018005b97e0001, quorum=127.0.0.1:60225, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-05T00:28:25,782 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:40355-0x1018005b97e0001, quorum=127.0.0.1:60225, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-05T00:28:25,783 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=40355 2024-12-05T00:28:25,783 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=40355 2024-12-05T00:28:25,783 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=40355 2024-12-05T00:28:25,783 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=40355 2024-12-05T00:28:25,784 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=40355 2024-12-05T00:28:25,795 DEBUG [M:0;2113c16e5528:40407 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;2113c16e5528:40407 2024-12-05T00:28:25,795 INFO [master/2113c16e5528:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/2113c16e5528,40407,1733358505724 2024-12-05T00:28:25,796 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40407-0x1018005b97e0000, quorum=127.0.0.1:60225, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-05T00:28:25,796 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40355-0x1018005b97e0001, quorum=127.0.0.1:60225, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-05T00:28:25,797 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:40407-0x1018005b97e0000, quorum=127.0.0.1:60225, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/2113c16e5528,40407,1733358505724 2024-12-05T00:28:25,800 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40355-0x1018005b97e0001, quorum=127.0.0.1:60225, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-05T00:28:25,800 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40407-0x1018005b97e0000, quorum=127.0.0.1:60225, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T00:28:25,800 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40355-0x1018005b97e0001, quorum=127.0.0.1:60225, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T00:28:25,800 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:40407-0x1018005b97e0000, quorum=127.0.0.1:60225, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-05T00:28:25,801 INFO [master/2113c16e5528:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/2113c16e5528,40407,1733358505724 from backup master directory 2024-12-05T00:28:25,802 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40355-0x1018005b97e0001, quorum=127.0.0.1:60225, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-05T00:28:25,802 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40407-0x1018005b97e0000, quorum=127.0.0.1:60225, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/2113c16e5528,40407,1733358505724 2024-12-05T00:28:25,802 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40407-0x1018005b97e0000, quorum=127.0.0.1:60225, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-05T00:28:25,802 WARN [master/2113c16e5528:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-05T00:28:25,802 INFO [master/2113c16e5528:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=2113c16e5528,40407,1733358505724 2024-12-05T00:28:25,806 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:43921/user/jenkins/test-data/4b039514-2647-14e9-144e-39d9b7cbc821/hbase.id] with ID: 09e4cc77-9f12-499e-913b-f792248fa803 2024-12-05T00:28:25,806 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:43921/user/jenkins/test-data/4b039514-2647-14e9-144e-39d9b7cbc821/.tmp/hbase.id 2024-12-05T00:28:25,811 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44693 is added to blk_1073741826_1002 (size=42) 2024-12-05T00:28:25,812 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741826_1002 (size=42) 2024-12-05T00:28:25,812 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:43921/user/jenkins/test-data/4b039514-2647-14e9-144e-39d9b7cbc821/.tmp/hbase.id]:[hdfs://localhost:43921/user/jenkins/test-data/4b039514-2647-14e9-144e-39d9b7cbc821/hbase.id] 2024-12-05T00:28:25,823 INFO [master/2113c16e5528:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-05T00:28:25,823 INFO [master/2113c16e5528:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-05T00:28:25,824 INFO [master/2113c16e5528:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-12-05T00:28:25,826 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40407-0x1018005b97e0000, quorum=127.0.0.1:60225, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T00:28:25,826 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40355-0x1018005b97e0001, quorum=127.0.0.1:60225, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T00:28:25,837 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44693 is added to blk_1073741827_1003 (size=196) 2024-12-05T00:28:25,837 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741827_1003 (size=196) 2024-12-05T00:28:25,838 INFO [master/2113c16e5528:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-05T00:28:25,838 INFO [master/2113c16e5528:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-05T00:28:25,839 INFO [master/2113c16e5528:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-05T00:28:25,845 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44693 is added to blk_1073741828_1004 (size=1189) 2024-12-05T00:28:25,845 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741828_1004 (size=1189) 2024-12-05T00:28:25,847 INFO [master/2113c16e5528:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:43921/user/jenkins/test-data/4b039514-2647-14e9-144e-39d9b7cbc821/MasterData/data/master/store 2024-12-05T00:28:25,852 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44693 is added to blk_1073741829_1005 (size=34) 2024-12-05T00:28:25,852 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741829_1005 (size=34) 2024-12-05T00:28:25,853 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T00:28:25,853 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-05T00:28:25,853 INFO [master/2113c16e5528:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-05T00:28:25,853 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-05T00:28:25,853 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-05T00:28:25,854 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-05T00:28:25,854 INFO [master/2113c16e5528:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-05T00:28:25,854 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733358505853Disabling compacts and flushes for region at 1733358505853Disabling writes for close at 1733358505853Writing region close event to WAL at 1733358505854 (+1 ms)Closed at 1733358505854 2024-12-05T00:28:25,854 WARN [master/2113c16e5528:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:43921/user/jenkins/test-data/4b039514-2647-14e9-144e-39d9b7cbc821/MasterData/data/master/store/.initializing 2024-12-05T00:28:25,854 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:43921/user/jenkins/test-data/4b039514-2647-14e9-144e-39d9b7cbc821/MasterData/WALs/2113c16e5528,40407,1733358505724 2024-12-05T00:28:25,857 INFO [master/2113c16e5528:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=2113c16e5528%2C40407%2C1733358505724, suffix=, logDir=hdfs://localhost:43921/user/jenkins/test-data/4b039514-2647-14e9-144e-39d9b7cbc821/MasterData/WALs/2113c16e5528,40407,1733358505724, archiveDir=hdfs://localhost:43921/user/jenkins/test-data/4b039514-2647-14e9-144e-39d9b7cbc821/MasterData/oldWALs, maxLogs=10 2024-12-05T00:28:25,858 INFO [master/2113c16e5528:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 2113c16e5528%2C40407%2C1733358505724.1733358505857 2024-12-05T00:28:25,866 INFO [master/2113c16e5528:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/4b039514-2647-14e9-144e-39d9b7cbc821/MasterData/WALs/2113c16e5528,40407,1733358505724/2113c16e5528%2C40407%2C1733358505724.1733358505857 2024-12-05T00:28:25,868 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33171:33171),(127.0.0.1/127.0.0.1:34223:34223)] 2024-12-05T00:28:25,871 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-05T00:28:25,871 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T00:28:25,871 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-05T00:28:25,871 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-05T00:28:25,872 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-05T00:28:25,874 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-05T00:28:25,874 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T00:28:25,874 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-05T00:28:25,874 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-05T00:28:25,875 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-05T00:28:25,875 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T00:28:25,875 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T00:28:25,875 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-05T00:28:25,876 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-05T00:28:25,877 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T00:28:25,877 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T00:28:25,877 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-05T00:28:25,878 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-05T00:28:25,878 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T00:28:25,878 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T00:28:25,879 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-05T00:28:25,879 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43921/user/jenkins/test-data/4b039514-2647-14e9-144e-39d9b7cbc821/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-05T00:28:25,879 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43921/user/jenkins/test-data/4b039514-2647-14e9-144e-39d9b7cbc821/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-05T00:28:25,881 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-05T00:28:25,881 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-05T00:28:25,881 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-05T00:28:25,882 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-05T00:28:25,884 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43921/user/jenkins/test-data/4b039514-2647-14e9-144e-39d9b7cbc821/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-05T00:28:25,884 INFO [master/2113c16e5528:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=816327, jitterRate=0.038014501333236694}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-05T00:28:25,885 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733358505871Initializing all the Stores at 1733358505872 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733358505872Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733358505872Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733358505872Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733358505872Cleaning up temporary data from old regions at 1733358505881 (+9 ms)Region opened successfully at 1733358505885 (+4 ms) 2024-12-05T00:28:25,885 INFO [master/2113c16e5528:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-05T00:28:25,889 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@ce22a25, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=2113c16e5528/172.17.0.2:0 2024-12-05T00:28:25,890 INFO [master/2113c16e5528:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-05T00:28:25,890 INFO [master/2113c16e5528:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-05T00:28:25,890 INFO [master/2113c16e5528:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-05T00:28:25,890 INFO [master/2113c16e5528:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-05T00:28:25,891 INFO [master/2113c16e5528:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-12-05T00:28:25,891 INFO [master/2113c16e5528:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-12-05T00:28:25,891 INFO [master/2113c16e5528:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-05T00:28:25,893 INFO [master/2113c16e5528:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-05T00:28:25,894 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40407-0x1018005b97e0000, quorum=127.0.0.1:60225, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-05T00:28:25,895 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-05T00:28:25,895 INFO [master/2113c16e5528:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-05T00:28:25,896 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40407-0x1018005b97e0000, quorum=127.0.0.1:60225, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-05T00:28:25,899 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-05T00:28:25,899 INFO [master/2113c16e5528:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-05T00:28:25,900 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40407-0x1018005b97e0000, quorum=127.0.0.1:60225, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-05T00:28:25,901 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-05T00:28:25,901 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40407-0x1018005b97e0000, quorum=127.0.0.1:60225, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-05T00:28:25,902 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-05T00:28:25,904 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40407-0x1018005b97e0000, quorum=127.0.0.1:60225, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-05T00:28:25,905 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-05T00:28:25,908 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40355-0x1018005b97e0001, quorum=127.0.0.1:60225, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-05T00:28:25,908 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40407-0x1018005b97e0000, quorum=127.0.0.1:60225, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-05T00:28:25,908 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40355-0x1018005b97e0001, quorum=127.0.0.1:60225, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T00:28:25,908 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40407-0x1018005b97e0000, quorum=127.0.0.1:60225, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T00:28:25,909 INFO [master/2113c16e5528:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=2113c16e5528,40407,1733358505724, sessionid=0x1018005b97e0000, setting cluster-up flag (Was=false) 2024-12-05T00:28:25,911 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40407-0x1018005b97e0000, quorum=127.0.0.1:60225, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T00:28:25,911 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40355-0x1018005b97e0001, quorum=127.0.0.1:60225, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T00:28:25,916 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-05T00:28:25,917 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=2113c16e5528,40407,1733358505724 2024-12-05T00:28:25,921 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40355-0x1018005b97e0001, quorum=127.0.0.1:60225, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T00:28:25,921 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40407-0x1018005b97e0000, quorum=127.0.0.1:60225, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T00:28:25,927 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-05T00:28:25,928 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=2113c16e5528,40407,1733358505724 2024-12-05T00:28:25,929 INFO [master/2113c16e5528:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:43921/user/jenkins/test-data/4b039514-2647-14e9-144e-39d9b7cbc821/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-05T00:28:25,931 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-05T00:28:25,931 INFO [master/2113c16e5528:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-05T00:28:25,931 INFO [master/2113c16e5528:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-05T00:28:25,931 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 2113c16e5528,40407,1733358505724 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-05T00:28:25,933 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/2113c16e5528:0, corePoolSize=5, maxPoolSize=5 2024-12-05T00:28:25,933 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/2113c16e5528:0, corePoolSize=5, maxPoolSize=5 2024-12-05T00:28:25,933 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/2113c16e5528:0, corePoolSize=5, maxPoolSize=5 2024-12-05T00:28:25,933 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/2113c16e5528:0, corePoolSize=5, maxPoolSize=5 2024-12-05T00:28:25,933 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/2113c16e5528:0, corePoolSize=10, maxPoolSize=10 2024-12-05T00:28:25,933 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/2113c16e5528:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:28:25,933 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/2113c16e5528:0, corePoolSize=2, maxPoolSize=2 2024-12-05T00:28:25,933 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/2113c16e5528:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:28:25,934 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-05T00:28:25,935 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-05T00:28:25,935 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T00:28:25,936 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-05T00:28:25,939 INFO [master/2113c16e5528:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733358535939 2024-12-05T00:28:25,939 INFO [master/2113c16e5528:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-05T00:28:25,939 INFO [master/2113c16e5528:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-05T00:28:25,939 INFO [master/2113c16e5528:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-05T00:28:25,939 INFO [master/2113c16e5528:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-05T00:28:25,939 INFO [master/2113c16e5528:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-05T00:28:25,939 INFO [master/2113c16e5528:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-05T00:28:25,940 INFO [master/2113c16e5528:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-05T00:28:25,940 INFO [master/2113c16e5528:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-05T00:28:25,940 INFO [master/2113c16e5528:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-05T00:28:25,941 INFO [master/2113c16e5528:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-05T00:28:25,941 INFO [master/2113c16e5528:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-05T00:28:25,941 INFO [master/2113c16e5528:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-05T00:28:25,941 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/2113c16e5528:0:becomeActiveMaster-HFileCleaner.large.0-1733358505941,5,FailOnTimeoutGroup] 2024-12-05T00:28:25,941 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/2113c16e5528:0:becomeActiveMaster-HFileCleaner.small.0-1733358505941,5,FailOnTimeoutGroup] 2024-12-05T00:28:25,941 INFO [master/2113c16e5528:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-05T00:28:25,941 INFO [master/2113c16e5528:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-05T00:28:25,941 INFO [master/2113c16e5528:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-05T00:28:25,941 INFO [master/2113c16e5528:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-05T00:28:25,947 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44693 is added to blk_1073741831_1007 (size=1321) 2024-12-05T00:28:25,948 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741831_1007 (size=1321) 2024-12-05T00:28:25,948 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:43921/user/jenkins/test-data/4b039514-2647-14e9-144e-39d9b7cbc821/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-05T00:28:25,949 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:43921/user/jenkins/test-data/4b039514-2647-14e9-144e-39d9b7cbc821 2024-12-05T00:28:25,959 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741832_1008 (size=32) 2024-12-05T00:28:25,960 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44693 is added to blk_1073741832_1008 (size=32) 2024-12-05T00:28:25,960 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T00:28:25,962 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-05T00:28:25,963 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-05T00:28:25,963 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T00:28:25,963 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-05T00:28:25,963 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-05T00:28:25,964 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-05T00:28:25,964 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T00:28:25,965 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-05T00:28:25,965 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-05T00:28:25,966 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-05T00:28:25,966 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T00:28:25,966 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-05T00:28:25,966 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-05T00:28:25,967 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-05T00:28:25,967 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T00:28:25,967 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-05T00:28:25,967 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-05T00:28:25,968 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43921/user/jenkins/test-data/4b039514-2647-14e9-144e-39d9b7cbc821/data/hbase/meta/1588230740 2024-12-05T00:28:25,968 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43921/user/jenkins/test-data/4b039514-2647-14e9-144e-39d9b7cbc821/data/hbase/meta/1588230740 2024-12-05T00:28:25,969 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-05T00:28:25,969 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-05T00:28:25,970 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-05T00:28:25,971 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-05T00:28:25,972 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43921/user/jenkins/test-data/4b039514-2647-14e9-144e-39d9b7cbc821/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-05T00:28:25,973 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=689489, jitterRate=-0.12326960265636444}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-05T00:28:25,973 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733358505961Initializing all the Stores at 1733358505961Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733358505961Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733358505961Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733358505961Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733358505961Cleaning up temporary data from old regions at 1733358505969 (+8 ms)Region opened successfully at 1733358505973 (+4 ms) 2024-12-05T00:28:25,973 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-05T00:28:25,973 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-05T00:28:25,974 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-05T00:28:25,974 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-05T00:28:25,974 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-05T00:28:25,974 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-05T00:28:25,974 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733358505973Disabling compacts and flushes for region at 1733358505973Disabling writes for close at 1733358505974 (+1 ms)Writing region close event to WAL at 1733358505974Closed at 1733358505974 2024-12-05T00:28:25,975 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-05T00:28:25,975 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-05T00:28:25,975 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-05T00:28:25,976 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-05T00:28:25,977 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-05T00:28:25,985 INFO [RS:0;2113c16e5528:40355 {}] regionserver.HRegionServer(746): ClusterId : 09e4cc77-9f12-499e-913b-f792248fa803 2024-12-05T00:28:25,985 DEBUG [RS:0;2113c16e5528:40355 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-05T00:28:25,987 DEBUG [RS:0;2113c16e5528:40355 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-05T00:28:25,987 DEBUG [RS:0;2113c16e5528:40355 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-05T00:28:25,990 DEBUG [RS:0;2113c16e5528:40355 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-05T00:28:25,990 DEBUG [RS:0;2113c16e5528:40355 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7f371e57, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=2113c16e5528/172.17.0.2:0 2024-12-05T00:28:26,002 DEBUG [RS:0;2113c16e5528:40355 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;2113c16e5528:40355 2024-12-05T00:28:26,002 INFO [RS:0;2113c16e5528:40355 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-05T00:28:26,002 INFO [RS:0;2113c16e5528:40355 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-05T00:28:26,002 DEBUG [RS:0;2113c16e5528:40355 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-05T00:28:26,003 INFO [RS:0;2113c16e5528:40355 {}] regionserver.HRegionServer(2659): reportForDuty to master=2113c16e5528,40407,1733358505724 with port=40355, startcode=1733358505772 2024-12-05T00:28:26,003 DEBUG [RS:0;2113c16e5528:40355 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-05T00:28:26,005 INFO [HMaster-EventLoopGroup-16-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38763, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.7 (auth:SIMPLE), service=RegionServerStatusService 2024-12-05T00:28:26,006 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40407 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 2113c16e5528,40355,1733358505772 2024-12-05T00:28:26,006 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40407 {}] master.ServerManager(517): Registering regionserver=2113c16e5528,40355,1733358505772 2024-12-05T00:28:26,007 DEBUG [RS:0;2113c16e5528:40355 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:43921/user/jenkins/test-data/4b039514-2647-14e9-144e-39d9b7cbc821 2024-12-05T00:28:26,007 DEBUG [RS:0;2113c16e5528:40355 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:43921 2024-12-05T00:28:26,007 DEBUG [RS:0;2113c16e5528:40355 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-05T00:28:26,009 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40407-0x1018005b97e0000, quorum=127.0.0.1:60225, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-05T00:28:26,009 DEBUG [RS:0;2113c16e5528:40355 {}] zookeeper.ZKUtil(111): regionserver:40355-0x1018005b97e0001, quorum=127.0.0.1:60225, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/2113c16e5528,40355,1733358505772 2024-12-05T00:28:26,009 WARN [RS:0;2113c16e5528:40355 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-05T00:28:26,009 INFO [RS:0;2113c16e5528:40355 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-05T00:28:26,010 DEBUG [RS:0;2113c16e5528:40355 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:43921/user/jenkins/test-data/4b039514-2647-14e9-144e-39d9b7cbc821/WALs/2113c16e5528,40355,1733358505772 2024-12-05T00:28:26,010 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [2113c16e5528,40355,1733358505772] 2024-12-05T00:28:26,012 INFO [RS:0;2113c16e5528:40355 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-05T00:28:26,014 INFO [RS:0;2113c16e5528:40355 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-05T00:28:26,014 INFO [RS:0;2113c16e5528:40355 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-05T00:28:26,014 INFO [RS:0;2113c16e5528:40355 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-05T00:28:26,014 INFO [RS:0;2113c16e5528:40355 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-05T00:28:26,015 INFO [RS:0;2113c16e5528:40355 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-05T00:28:26,015 INFO [RS:0;2113c16e5528:40355 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-05T00:28:26,015 DEBUG [RS:0;2113c16e5528:40355 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/2113c16e5528:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:28:26,015 DEBUG [RS:0;2113c16e5528:40355 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/2113c16e5528:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:28:26,015 DEBUG [RS:0;2113c16e5528:40355 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/2113c16e5528:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:28:26,015 DEBUG [RS:0;2113c16e5528:40355 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/2113c16e5528:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:28:26,015 DEBUG [RS:0;2113c16e5528:40355 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/2113c16e5528:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:28:26,015 DEBUG [RS:0;2113c16e5528:40355 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/2113c16e5528:0, corePoolSize=2, maxPoolSize=2 2024-12-05T00:28:26,015 DEBUG [RS:0;2113c16e5528:40355 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/2113c16e5528:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:28:26,016 DEBUG [RS:0;2113c16e5528:40355 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/2113c16e5528:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:28:26,016 DEBUG [RS:0;2113c16e5528:40355 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/2113c16e5528:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:28:26,016 DEBUG [RS:0;2113c16e5528:40355 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/2113c16e5528:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:28:26,016 DEBUG [RS:0;2113c16e5528:40355 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/2113c16e5528:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:28:26,016 DEBUG [RS:0;2113c16e5528:40355 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/2113c16e5528:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:28:26,016 DEBUG [RS:0;2113c16e5528:40355 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0, corePoolSize=3, maxPoolSize=3 2024-12-05T00:28:26,016 DEBUG [RS:0;2113c16e5528:40355 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/2113c16e5528:0, corePoolSize=3, maxPoolSize=3 2024-12-05T00:28:26,016 INFO [RS:0;2113c16e5528:40355 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-05T00:28:26,016 INFO [RS:0;2113c16e5528:40355 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-05T00:28:26,016 INFO [RS:0;2113c16e5528:40355 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-05T00:28:26,016 INFO [RS:0;2113c16e5528:40355 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-05T00:28:26,016 INFO [RS:0;2113c16e5528:40355 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-05T00:28:26,016 INFO [RS:0;2113c16e5528:40355 {}] hbase.ChoreService(168): Chore ScheduledChore name=2113c16e5528,40355,1733358505772-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-05T00:28:26,034 INFO [RS:0;2113c16e5528:40355 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-05T00:28:26,034 INFO [RS:0;2113c16e5528:40355 {}] hbase.ChoreService(168): Chore ScheduledChore name=2113c16e5528,40355,1733358505772-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-05T00:28:26,034 INFO [RS:0;2113c16e5528:40355 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-05T00:28:26,034 INFO [RS:0;2113c16e5528:40355 {}] regionserver.Replication(171): 2113c16e5528,40355,1733358505772 started 2024-12-05T00:28:26,048 INFO [RS:0;2113c16e5528:40355 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-05T00:28:26,048 INFO [RS:0;2113c16e5528:40355 {}] regionserver.HRegionServer(1482): Serving as 2113c16e5528,40355,1733358505772, RpcServer on 2113c16e5528/172.17.0.2:40355, sessionid=0x1018005b97e0001 2024-12-05T00:28:26,048 DEBUG [RS:0;2113c16e5528:40355 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-05T00:28:26,049 DEBUG [RS:0;2113c16e5528:40355 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 2113c16e5528,40355,1733358505772 2024-12-05T00:28:26,049 DEBUG [RS:0;2113c16e5528:40355 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '2113c16e5528,40355,1733358505772' 2024-12-05T00:28:26,049 DEBUG [RS:0;2113c16e5528:40355 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-05T00:28:26,049 DEBUG [RS:0;2113c16e5528:40355 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-05T00:28:26,049 DEBUG [RS:0;2113c16e5528:40355 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-05T00:28:26,049 DEBUG [RS:0;2113c16e5528:40355 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-05T00:28:26,049 DEBUG [RS:0;2113c16e5528:40355 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 2113c16e5528,40355,1733358505772 2024-12-05T00:28:26,049 DEBUG [RS:0;2113c16e5528:40355 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '2113c16e5528,40355,1733358505772' 2024-12-05T00:28:26,049 DEBUG [RS:0;2113c16e5528:40355 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-05T00:28:26,050 DEBUG [RS:0;2113c16e5528:40355 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-05T00:28:26,050 DEBUG [RS:0;2113c16e5528:40355 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-05T00:28:26,050 INFO [RS:0;2113c16e5528:40355 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-05T00:28:26,050 INFO [RS:0;2113c16e5528:40355 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-05T00:28:26,127 WARN [2113c16e5528:40407 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-05T00:28:26,152 INFO [RS:0;2113c16e5528:40355 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=2113c16e5528%2C40355%2C1733358505772, suffix=, logDir=hdfs://localhost:43921/user/jenkins/test-data/4b039514-2647-14e9-144e-39d9b7cbc821/WALs/2113c16e5528,40355,1733358505772, archiveDir=hdfs://localhost:43921/user/jenkins/test-data/4b039514-2647-14e9-144e-39d9b7cbc821/oldWALs, maxLogs=32 2024-12-05T00:28:26,152 INFO [RS:0;2113c16e5528:40355 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 2113c16e5528%2C40355%2C1733358505772.1733358506152 2024-12-05T00:28:26,158 INFO [RS:0;2113c16e5528:40355 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/4b039514-2647-14e9-144e-39d9b7cbc821/WALs/2113c16e5528,40355,1733358505772/2113c16e5528%2C40355%2C1733358505772.1733358506152 2024-12-05T00:28:26,158 DEBUG [RS:0;2113c16e5528:40355 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34223:34223),(127.0.0.1/127.0.0.1:33171:33171)] 2024-12-05T00:28:26,260 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,37749,1733358324931/2113c16e5528%2C37749%2C1733358324931.meta.1733358325913.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:28:26,260 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41383/user/jenkins/test-data/e6246c27-082c-0954-c783-c60e0baffde8/WALs/2113c16e5528,45031,1733358326108/2113c16e5528%2C45031%2C1733358326108.1733358326307 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-05T00:28:26,377 DEBUG [2113c16e5528:40407 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-05T00:28:26,378 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=2113c16e5528,40355,1733358505772 2024-12-05T00:28:26,379 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 2113c16e5528,40355,1733358505772, state=OPENING 2024-12-05T00:28:26,380 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-05T00:28:26,382 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40407-0x1018005b97e0000, quorum=127.0.0.1:60225, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T00:28:26,382 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40355-0x1018005b97e0001, quorum=127.0.0.1:60225, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T00:28:26,382 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-05T00:28:26,382 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-05T00:28:26,382 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-05T00:28:26,382 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=2113c16e5528,40355,1733358505772}] 2024-12-05T00:28:26,535 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-05T00:28:26,537 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44121, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-05T00:28:26,540 INFO [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-05T00:28:26,540 INFO [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-05T00:28:26,541 INFO [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=2113c16e5528%2C40355%2C1733358505772.meta, suffix=.meta, logDir=hdfs://localhost:43921/user/jenkins/test-data/4b039514-2647-14e9-144e-39d9b7cbc821/WALs/2113c16e5528,40355,1733358505772, archiveDir=hdfs://localhost:43921/user/jenkins/test-data/4b039514-2647-14e9-144e-39d9b7cbc821/oldWALs, maxLogs=32 2024-12-05T00:28:26,542 INFO [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 2113c16e5528%2C40355%2C1733358505772.meta.1733358506542.meta 2024-12-05T00:28:26,546 INFO [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/4b039514-2647-14e9-144e-39d9b7cbc821/WALs/2113c16e5528,40355,1733358505772/2113c16e5528%2C40355%2C1733358505772.meta.1733358506542.meta 2024-12-05T00:28:26,552 DEBUG [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34223:34223),(127.0.0.1/127.0.0.1:33171:33171)] 2024-12-05T00:28:26,553 DEBUG [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-05T00:28:26,553 DEBUG [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-05T00:28:26,553 DEBUG [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-05T00:28:26,554 INFO [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-05T00:28:26,554 DEBUG [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-05T00:28:26,554 DEBUG [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T00:28:26,554 DEBUG [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-05T00:28:26,554 DEBUG [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-05T00:28:26,555 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-05T00:28:26,556 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-05T00:28:26,556 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T00:28:26,556 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-05T00:28:26,556 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-05T00:28:26,557 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-05T00:28:26,557 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T00:28:26,557 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-05T00:28:26,557 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-05T00:28:26,557 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-05T00:28:26,557 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T00:28:26,558 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-05T00:28:26,558 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-05T00:28:26,558 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-05T00:28:26,558 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T00:28:26,559 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-05T00:28:26,559 DEBUG [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-05T00:28:26,559 DEBUG [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43921/user/jenkins/test-data/4b039514-2647-14e9-144e-39d9b7cbc821/data/hbase/meta/1588230740 2024-12-05T00:28:26,560 DEBUG [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43921/user/jenkins/test-data/4b039514-2647-14e9-144e-39d9b7cbc821/data/hbase/meta/1588230740 2024-12-05T00:28:26,561 DEBUG [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-05T00:28:26,561 DEBUG [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-05T00:28:26,562 DEBUG [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-05T00:28:26,563 DEBUG [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-05T00:28:26,563 INFO [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=703239, jitterRate=-0.10578611493110657}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-05T00:28:26,563 DEBUG [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-05T00:28:26,564 DEBUG [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733358506554Writing region info on filesystem at 1733358506554Initializing all the Stores at 1733358506555 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733358506555Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733358506555Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733358506555Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733358506555Cleaning up temporary data from old regions at 1733358506561 (+6 ms)Running coprocessor post-open hooks at 1733358506563 (+2 ms)Region opened successfully at 1733358506564 (+1 ms) 2024-12-05T00:28:26,565 INFO [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733358506534 2024-12-05T00:28:26,567 DEBUG [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-05T00:28:26,567 INFO [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-05T00:28:26,567 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=2113c16e5528,40355,1733358505772 2024-12-05T00:28:26,568 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 2113c16e5528,40355,1733358505772, state=OPEN 2024-12-05T00:28:26,577 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40407-0x1018005b97e0000, quorum=127.0.0.1:60225, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-05T00:28:26,577 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40355-0x1018005b97e0001, quorum=127.0.0.1:60225, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-05T00:28:26,577 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=2113c16e5528,40355,1733358505772 2024-12-05T00:28:26,577 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-05T00:28:26,577 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-05T00:28:26,579 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-05T00:28:26,579 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=2113c16e5528,40355,1733358505772 in 195 msec 2024-12-05T00:28:26,581 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-05T00:28:26,581 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 604 msec 2024-12-05T00:28:26,581 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-05T00:28:26,581 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-05T00:28:26,582 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-05T00:28:26,583 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=2113c16e5528,40355,1733358505772, seqNum=-1] 2024-12-05T00:28:26,583 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-05T00:28:26,584 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50927, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-05T00:28:26,588 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 657 msec 2024-12-05T00:28:26,588 INFO [master/2113c16e5528:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733358506588, completionTime=-1 2024-12-05T00:28:26,588 INFO [master/2113c16e5528:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-05T00:28:26,588 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-05T00:28:26,590 INFO [master/2113c16e5528:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-12-05T00:28:26,590 INFO [master/2113c16e5528:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733358566590 2024-12-05T00:28:26,590 INFO [master/2113c16e5528:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733358626590 2024-12-05T00:28:26,590 INFO [master/2113c16e5528:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 1 msec 2024-12-05T00:28:26,590 INFO [master/2113c16e5528:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=2113c16e5528,40407,1733358505724-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-05T00:28:26,590 INFO [master/2113c16e5528:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=2113c16e5528,40407,1733358505724-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-05T00:28:26,590 INFO [master/2113c16e5528:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=2113c16e5528,40407,1733358505724-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-05T00:28:26,590 INFO [master/2113c16e5528:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-2113c16e5528:40407, period=300000, unit=MILLISECONDS is enabled. 2024-12-05T00:28:26,590 INFO [master/2113c16e5528:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-05T00:28:26,590 INFO [master/2113c16e5528:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-05T00:28:26,592 DEBUG [master/2113c16e5528:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-05T00:28:26,594 INFO [master/2113c16e5528:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.792sec 2024-12-05T00:28:26,594 INFO [master/2113c16e5528:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-05T00:28:26,594 INFO [master/2113c16e5528:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-05T00:28:26,594 INFO [master/2113c16e5528:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-05T00:28:26,594 INFO [master/2113c16e5528:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-05T00:28:26,594 INFO [master/2113c16e5528:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-05T00:28:26,594 INFO [master/2113c16e5528:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=2113c16e5528,40407,1733358505724-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-05T00:28:26,594 INFO [master/2113c16e5528:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=2113c16e5528,40407,1733358505724-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-05T00:28:26,596 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-05T00:28:26,596 INFO [master/2113c16e5528:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-05T00:28:26,596 INFO [master/2113c16e5528:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=2113c16e5528,40407,1733358505724-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-05T00:28:26,685 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1ba1b0df, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T00:28:26,685 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 2113c16e5528,40407,-1 for getting cluster id 2024-12-05T00:28:26,686 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-05T00:28:26,687 DEBUG [HMaster-EventLoopGroup-16-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '09e4cc77-9f12-499e-913b-f792248fa803' 2024-12-05T00:28:26,687 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-05T00:28:26,687 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "09e4cc77-9f12-499e-913b-f792248fa803" 2024-12-05T00:28:26,688 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@63bb83a5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T00:28:26,688 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [2113c16e5528,40407,-1] 2024-12-05T00:28:26,688 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-05T00:28:26,688 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T00:28:26,689 INFO [HMaster-EventLoopGroup-16-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51380, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-05T00:28:26,690 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@72757a2f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T00:28:26,690 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-05T00:28:26,691 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=2113c16e5528,40355,1733358505772, seqNum=-1] 2024-12-05T00:28:26,691 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-05T00:28:26,692 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57952, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-05T00:28:26,693 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=2113c16e5528,40407,1733358505724 2024-12-05T00:28:26,694 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-05T00:28:26,696 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-12-05T00:28:26,696 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-05T00:28:26,698 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=test.com%2C8080%2C1, suffix=, logDir=hdfs://localhost:43921/user/jenkins/test-data/4b039514-2647-14e9-144e-39d9b7cbc821/WALs/test.com,8080,1, archiveDir=hdfs://localhost:43921/user/jenkins/test-data/4b039514-2647-14e9-144e-39d9b7cbc821/oldWALs, maxLogs=32 2024-12-05T00:28:26,698 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor test.com%2C8080%2C1.1733358506698 2024-12-05T00:28:26,702 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/4b039514-2647-14e9-144e-39d9b7cbc821/WALs/test.com,8080,1/test.com%2C8080%2C1.1733358506698 2024-12-05T00:28:26,703 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33171:33171),(127.0.0.1/127.0.0.1:34223:34223)] 2024-12-05T00:28:26,704 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor test.com%2C8080%2C1.1733358506704 2024-12-05T00:28:26,708 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:28:26,708 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:28:26,708 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:28:26,708 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:28:26,708 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:28:26,708 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/4b039514-2647-14e9-144e-39d9b7cbc821/WALs/test.com,8080,1/test.com%2C8080%2C1.1733358506698 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/4b039514-2647-14e9-144e-39d9b7cbc821/WALs/test.com,8080,1/test.com%2C8080%2C1.1733358506704 2024-12-05T00:28:26,709 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34223:34223),(127.0.0.1/127.0.0.1:33171:33171)] 2024-12-05T00:28:26,709 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:43921/user/jenkins/test-data/4b039514-2647-14e9-144e-39d9b7cbc821/WALs/test.com,8080,1/test.com%2C8080%2C1.1733358506698 is not closed yet, will try archiving it next time 2024-12-05T00:28:26,709 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:28:26,709 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44693 is added to blk_1073741835_1011 (size=93) 2024-12-05T00:28:26,709 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:28:26,710 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:28:26,710 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741835_1011 (size=93) 2024-12-05T00:28:26,710 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:28:26,710 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:28:26,710 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:43921/user/jenkins/test-data/4b039514-2647-14e9-144e-39d9b7cbc821/WALs/test.com,8080,1/test.com%2C8080%2C1.1733358506698 to hdfs://localhost:43921/user/jenkins/test-data/4b039514-2647-14e9-144e-39d9b7cbc821/oldWALs/test.com%2C8080%2C1.1733358506698 2024-12-05T00:28:26,711 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741836_1012 (size=93) 2024-12-05T00:28:26,712 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44693 is added to blk_1073741836_1012 (size=93) 2024-12-05T00:28:26,713 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/4b039514-2647-14e9-144e-39d9b7cbc821/oldWALs 2024-12-05T00:28:26,713 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog test.com%2C8080%2C1:(num 1733358506704) 2024-12-05T00:28:26,713 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-05T00:28:26,713 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-05T00:28:26,713 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-05T00:28:26,714 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T00:28:26,714 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T00:28:26,714 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-05T00:28:26,714 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-05T00:28:26,714 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1085113023, stopped=false 2024-12-05T00:28:26,714 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=2113c16e5528,40407,1733358505724 2024-12-05T00:28:26,715 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40407-0x1018005b97e0000, quorum=127.0.0.1:60225, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-05T00:28:26,715 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40355-0x1018005b97e0001, quorum=127.0.0.1:60225, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-05T00:28:26,715 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40407-0x1018005b97e0000, quorum=127.0.0.1:60225, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T00:28:26,715 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40355-0x1018005b97e0001, quorum=127.0.0.1:60225, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T00:28:26,715 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-05T00:28:26,715 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-05T00:28:26,715 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-05T00:28:26,715 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T00:28:26,716 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '2113c16e5528,40355,1733358505772' ***** 2024-12-05T00:28:26,716 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:40355-0x1018005b97e0001, quorum=127.0.0.1:60225, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-05T00:28:26,716 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-05T00:28:26,716 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:40407-0x1018005b97e0000, quorum=127.0.0.1:60225, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-05T00:28:26,716 INFO [RS:0;2113c16e5528:40355 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-05T00:28:26,716 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-05T00:28:26,716 INFO [RS:0;2113c16e5528:40355 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-05T00:28:26,716 INFO [RS:0;2113c16e5528:40355 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-05T00:28:26,716 INFO [RS:0;2113c16e5528:40355 {}] regionserver.HRegionServer(959): stopping server 2113c16e5528,40355,1733358505772 2024-12-05T00:28:26,716 INFO [RS:0;2113c16e5528:40355 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-05T00:28:26,716 INFO [RS:0;2113c16e5528:40355 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;2113c16e5528:40355. 2024-12-05T00:28:26,716 DEBUG [RS:0;2113c16e5528:40355 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-05T00:28:26,716 DEBUG [RS:0;2113c16e5528:40355 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T00:28:26,716 INFO [RS:0;2113c16e5528:40355 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-05T00:28:26,716 INFO [RS:0;2113c16e5528:40355 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-05T00:28:26,717 INFO [RS:0;2113c16e5528:40355 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-05T00:28:26,717 INFO [RS:0;2113c16e5528:40355 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-05T00:28:26,717 INFO [RS:0;2113c16e5528:40355 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-12-05T00:28:26,717 DEBUG [RS:0;2113c16e5528:40355 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-12-05T00:28:26,717 DEBUG [RS:0;2113c16e5528:40355 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-12-05T00:28:26,717 DEBUG [RS_CLOSE_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-05T00:28:26,717 INFO [RS_CLOSE_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-05T00:28:26,717 DEBUG [RS_CLOSE_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-05T00:28:26,717 DEBUG [RS_CLOSE_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-05T00:28:26,717 DEBUG [RS_CLOSE_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-05T00:28:26,717 INFO [RS_CLOSE_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=74 B heapSize=1.22 KB 2024-12-05T00:28:26,733 DEBUG [RS_CLOSE_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43921/user/jenkins/test-data/4b039514-2647-14e9-144e-39d9b7cbc821/data/hbase/meta/1588230740/.tmp/ns/ef364f90140448769d30447f1527adae is 43, key is default/ns:d/1733358506584/Put/seqid=0 2024-12-05T00:28:26,737 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741837_1013 (size=5153) 2024-12-05T00:28:26,737 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44693 is added to blk_1073741837_1013 (size=5153) 2024-12-05T00:28:26,738 INFO [RS_CLOSE_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:43921/user/jenkins/test-data/4b039514-2647-14e9-144e-39d9b7cbc821/data/hbase/meta/1588230740/.tmp/ns/ef364f90140448769d30447f1527adae 2024-12-05T00:28:26,743 DEBUG [RS_CLOSE_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43921/user/jenkins/test-data/4b039514-2647-14e9-144e-39d9b7cbc821/data/hbase/meta/1588230740/.tmp/ns/ef364f90140448769d30447f1527adae as hdfs://localhost:43921/user/jenkins/test-data/4b039514-2647-14e9-144e-39d9b7cbc821/data/hbase/meta/1588230740/ns/ef364f90140448769d30447f1527adae 2024-12-05T00:28:26,748 INFO [RS_CLOSE_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43921/user/jenkins/test-data/4b039514-2647-14e9-144e-39d9b7cbc821/data/hbase/meta/1588230740/ns/ef364f90140448769d30447f1527adae, entries=2, sequenceid=6, filesize=5.0 K 2024-12-05T00:28:26,749 INFO [RS_CLOSE_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 32ms, sequenceid=6, compaction requested=false 2024-12-05T00:28:26,749 DEBUG [RS_CLOSE_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_META}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-05T00:28:26,752 DEBUG [RS_CLOSE_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43921/user/jenkins/test-data/4b039514-2647-14e9-144e-39d9b7cbc821/data/hbase/meta/1588230740/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-05T00:28:26,753 DEBUG [RS_CLOSE_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-05T00:28:26,753 INFO [RS_CLOSE_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-05T00:28:26,753 DEBUG [RS_CLOSE_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733358506717Running coprocessor pre-close hooks at 1733358506717Disabling compacts and flushes for region at 1733358506717Disabling writes for close at 1733358506717Obtaining lock to block concurrent updates at 1733358506717Preparing flush snapshotting stores in 1588230740 at 1733358506717Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=74, getHeapSize=1184, getOffHeapSize=0, getCellsCount=2 at 1733358506717Flushing stores of hbase:meta,,1.1588230740 at 1733358506718 (+1 ms)Flushing 1588230740/ns: creating writer at 1733358506718Flushing 1588230740/ns: appending metadata at 1733358506732 (+14 ms)Flushing 1588230740/ns: closing flushed file at 1733358506732Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@566a16d5: reopening flushed file at 1733358506742 (+10 ms)Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 32ms, sequenceid=6, compaction requested=false at 1733358506749 (+7 ms)Writing region close event to WAL at 1733358506750 (+1 ms)Running coprocessor post-close hooks at 1733358506753 (+3 ms)Closed at 1733358506753 2024-12-05T00:28:26,753 DEBUG [RS_CLOSE_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-05T00:28:26,917 INFO [RS:0;2113c16e5528:40355 {}] regionserver.HRegionServer(976): stopping server 2113c16e5528,40355,1733358505772; all regions closed. 2024-12-05T00:28:26,918 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:28:26,918 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:28:26,918 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:28:26,918 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:28:26,918 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:28:26,920 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741834_1010 (size=1152) 2024-12-05T00:28:26,920 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44693 is added to blk_1073741834_1010 (size=1152) 2024-12-05T00:28:26,922 DEBUG [RS:0;2113c16e5528:40355 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/4b039514-2647-14e9-144e-39d9b7cbc821/oldWALs 2024-12-05T00:28:26,922 INFO [RS:0;2113c16e5528:40355 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 2113c16e5528%2C40355%2C1733358505772.meta:.meta(num 1733358506542) 2024-12-05T00:28:26,922 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:28:26,922 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:28:26,922 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:28:26,922 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:28:26,922 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:28:26,924 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741833_1009 (size=93) 2024-12-05T00:28:26,924 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44693 is added to blk_1073741833_1009 (size=93) 2024-12-05T00:28:26,926 DEBUG [RS:0;2113c16e5528:40355 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/4b039514-2647-14e9-144e-39d9b7cbc821/oldWALs 2024-12-05T00:28:26,926 INFO [RS:0;2113c16e5528:40355 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 2113c16e5528%2C40355%2C1733358505772:(num 1733358506152) 2024-12-05T00:28:26,926 DEBUG [RS:0;2113c16e5528:40355 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T00:28:26,926 INFO [RS:0;2113c16e5528:40355 {}] regionserver.LeaseManager(133): Closed leases 2024-12-05T00:28:26,926 INFO [RS:0;2113c16e5528:40355 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-05T00:28:26,926 INFO [RS:0;2113c16e5528:40355 {}] hbase.ChoreService(370): Chore service for: regionserver/2113c16e5528:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-12-05T00:28:26,926 INFO [RS:0;2113c16e5528:40355 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-05T00:28:26,926 INFO [regionserver/2113c16e5528:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-05T00:28:26,926 INFO [RS:0;2113c16e5528:40355 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:40355 2024-12-05T00:28:26,928 INFO [RS:0;2113c16e5528:40355 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-05T00:28:26,929 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40407-0x1018005b97e0000, quorum=127.0.0.1:60225, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-05T00:28:26,929 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40355-0x1018005b97e0001, quorum=127.0.0.1:60225, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/2113c16e5528,40355,1733358505772 2024-12-05T00:28:26,930 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [2113c16e5528,40355,1733358505772] 2024-12-05T00:28:26,936 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/2113c16e5528,40355,1733358505772 already deleted, retry=false 2024-12-05T00:28:26,936 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 2113c16e5528,40355,1733358505772 expired; onlineServers=0 2024-12-05T00:28:26,936 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '2113c16e5528,40407,1733358505724' ***** 2024-12-05T00:28:26,936 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-05T00:28:26,936 INFO [M:0;2113c16e5528:40407 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-05T00:28:26,936 INFO [M:0;2113c16e5528:40407 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-05T00:28:26,936 DEBUG [M:0;2113c16e5528:40407 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-05T00:28:26,936 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-05T00:28:26,936 DEBUG [M:0;2113c16e5528:40407 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-05T00:28:26,936 DEBUG [master/2113c16e5528:0:becomeActiveMaster-HFileCleaner.large.0-1733358505941 {}] cleaner.HFileCleaner(306): Exit Thread[master/2113c16e5528:0:becomeActiveMaster-HFileCleaner.large.0-1733358505941,5,FailOnTimeoutGroup] 2024-12-05T00:28:26,936 DEBUG [master/2113c16e5528:0:becomeActiveMaster-HFileCleaner.small.0-1733358505941 {}] cleaner.HFileCleaner(306): Exit Thread[master/2113c16e5528:0:becomeActiveMaster-HFileCleaner.small.0-1733358505941,5,FailOnTimeoutGroup] 2024-12-05T00:28:26,936 INFO [M:0;2113c16e5528:40407 {}] hbase.ChoreService(370): Chore service for: master/2113c16e5528:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-05T00:28:26,936 INFO [M:0;2113c16e5528:40407 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-05T00:28:26,936 DEBUG [M:0;2113c16e5528:40407 {}] master.HMaster(1795): Stopping service threads 2024-12-05T00:28:26,937 INFO [M:0;2113c16e5528:40407 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-05T00:28:26,937 INFO [M:0;2113c16e5528:40407 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-05T00:28:26,937 INFO [M:0;2113c16e5528:40407 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-05T00:28:26,937 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-05T00:28:26,937 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40407-0x1018005b97e0000, quorum=127.0.0.1:60225, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-05T00:28:26,937 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40407-0x1018005b97e0000, quorum=127.0.0.1:60225, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T00:28:26,937 DEBUG [M:0;2113c16e5528:40407 {}] zookeeper.ZKUtil(347): master:40407-0x1018005b97e0000, quorum=127.0.0.1:60225, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-05T00:28:26,938 WARN [M:0;2113c16e5528:40407 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-05T00:28:26,938 INFO [M:0;2113c16e5528:40407 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:43921/user/jenkins/test-data/4b039514-2647-14e9-144e-39d9b7cbc821/.lastflushedseqids 2024-12-05T00:28:26,943 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741838_1014 (size=99) 2024-12-05T00:28:26,943 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44693 is added to blk_1073741838_1014 (size=99) 2024-12-05T00:28:26,943 INFO [M:0;2113c16e5528:40407 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-05T00:28:26,943 INFO [M:0;2113c16e5528:40407 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-05T00:28:26,944 DEBUG [M:0;2113c16e5528:40407 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-05T00:28:26,944 INFO [M:0;2113c16e5528:40407 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-05T00:28:26,944 DEBUG [M:0;2113c16e5528:40407 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-05T00:28:26,944 DEBUG [M:0;2113c16e5528:40407 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-05T00:28:26,944 DEBUG [M:0;2113c16e5528:40407 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-05T00:28:26,944 INFO [M:0;2113c16e5528:40407 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=7.67 KB heapSize=11.34 KB 2024-12-05T00:28:26,965 DEBUG [M:0;2113c16e5528:40407 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43921/user/jenkins/test-data/4b039514-2647-14e9-144e-39d9b7cbc821/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/799b55b62be3482087ef70d7d0b4eb4c is 82, key is hbase:meta,,1/info:regioninfo/1733358506567/Put/seqid=0 2024-12-05T00:28:26,969 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44693 is added to blk_1073741839_1015 (size=5672) 2024-12-05T00:28:26,969 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741839_1015 (size=5672) 2024-12-05T00:28:26,970 INFO [M:0;2113c16e5528:40407 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:43921/user/jenkins/test-data/4b039514-2647-14e9-144e-39d9b7cbc821/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/799b55b62be3482087ef70d7d0b4eb4c 2024-12-05T00:28:26,989 DEBUG [M:0;2113c16e5528:40407 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43921/user/jenkins/test-data/4b039514-2647-14e9-144e-39d9b7cbc821/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/447c87a3665f48c386238ad0034c7a4c is 240, key is \x00\x00\x00\x00\x00\x00\x00\x01/proc:d/1733358506588/Put/seqid=0 2024-12-05T00:28:26,993 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44693 is added to blk_1073741840_1016 (size=5275) 2024-12-05T00:28:26,993 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741840_1016 (size=5275) 2024-12-05T00:28:26,993 INFO [M:0;2113c16e5528:40407 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.06 KB at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:43921/user/jenkins/test-data/4b039514-2647-14e9-144e-39d9b7cbc821/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/447c87a3665f48c386238ad0034c7a4c 2024-12-05T00:28:27,012 DEBUG [M:0;2113c16e5528:40407 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43921/user/jenkins/test-data/4b039514-2647-14e9-144e-39d9b7cbc821/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/5971c08731d94829ab8627d0dfa2fb20 is 69, key is 2113c16e5528,40355,1733358505772/rs:state/1733358506006/Put/seqid=0 2024-12-05T00:28:27,016 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741841_1017 (size=5156) 2024-12-05T00:28:27,016 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44693 is added to blk_1073741841_1017 (size=5156) 2024-12-05T00:28:27,017 INFO [M:0;2113c16e5528:40407 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:43921/user/jenkins/test-data/4b039514-2647-14e9-144e-39d9b7cbc821/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/5971c08731d94829ab8627d0dfa2fb20 2024-12-05T00:28:27,030 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40355-0x1018005b97e0001, quorum=127.0.0.1:60225, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-05T00:28:27,030 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40355-0x1018005b97e0001, quorum=127.0.0.1:60225, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-05T00:28:27,030 INFO [RS:0;2113c16e5528:40355 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-05T00:28:27,030 INFO [RS:0;2113c16e5528:40355 {}] regionserver.HRegionServer(1031): Exiting; stopping=2113c16e5528,40355,1733358505772; zookeeper connection closed. 2024-12-05T00:28:27,030 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@1aa8c993 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@1aa8c993 2024-12-05T00:28:27,030 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-12-05T00:28:27,043 DEBUG [M:0;2113c16e5528:40407 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43921/user/jenkins/test-data/4b039514-2647-14e9-144e-39d9b7cbc821/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/995a8f37c86448a7b9d82e7626517af1 is 52, key is load_balancer_on/state:d/1733358506695/Put/seqid=0 2024-12-05T00:28:27,047 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44693 is added to blk_1073741842_1018 (size=5056) 2024-12-05T00:28:27,047 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741842_1018 (size=5056) 2024-12-05T00:28:27,048 INFO [M:0;2113c16e5528:40407 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:43921/user/jenkins/test-data/4b039514-2647-14e9-144e-39d9b7cbc821/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/995a8f37c86448a7b9d82e7626517af1 2024-12-05T00:28:27,052 DEBUG [M:0;2113c16e5528:40407 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43921/user/jenkins/test-data/4b039514-2647-14e9-144e-39d9b7cbc821/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/799b55b62be3482087ef70d7d0b4eb4c as hdfs://localhost:43921/user/jenkins/test-data/4b039514-2647-14e9-144e-39d9b7cbc821/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/799b55b62be3482087ef70d7d0b4eb4c 2024-12-05T00:28:27,056 INFO [M:0;2113c16e5528:40407 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43921/user/jenkins/test-data/4b039514-2647-14e9-144e-39d9b7cbc821/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/799b55b62be3482087ef70d7d0b4eb4c, entries=8, sequenceid=29, filesize=5.5 K 2024-12-05T00:28:27,057 DEBUG [M:0;2113c16e5528:40407 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43921/user/jenkins/test-data/4b039514-2647-14e9-144e-39d9b7cbc821/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/447c87a3665f48c386238ad0034c7a4c as hdfs://localhost:43921/user/jenkins/test-data/4b039514-2647-14e9-144e-39d9b7cbc821/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/447c87a3665f48c386238ad0034c7a4c 2024-12-05T00:28:27,061 INFO [M:0;2113c16e5528:40407 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43921/user/jenkins/test-data/4b039514-2647-14e9-144e-39d9b7cbc821/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/447c87a3665f48c386238ad0034c7a4c, entries=3, sequenceid=29, filesize=5.2 K 2024-12-05T00:28:27,062 DEBUG [M:0;2113c16e5528:40407 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43921/user/jenkins/test-data/4b039514-2647-14e9-144e-39d9b7cbc821/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/5971c08731d94829ab8627d0dfa2fb20 as hdfs://localhost:43921/user/jenkins/test-data/4b039514-2647-14e9-144e-39d9b7cbc821/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/5971c08731d94829ab8627d0dfa2fb20 2024-12-05T00:28:27,066 INFO [M:0;2113c16e5528:40407 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43921/user/jenkins/test-data/4b039514-2647-14e9-144e-39d9b7cbc821/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/5971c08731d94829ab8627d0dfa2fb20, entries=1, sequenceid=29, filesize=5.0 K 2024-12-05T00:28:27,067 DEBUG [M:0;2113c16e5528:40407 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43921/user/jenkins/test-data/4b039514-2647-14e9-144e-39d9b7cbc821/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/995a8f37c86448a7b9d82e7626517af1 as hdfs://localhost:43921/user/jenkins/test-data/4b039514-2647-14e9-144e-39d9b7cbc821/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/995a8f37c86448a7b9d82e7626517af1 2024-12-05T00:28:27,071 INFO [M:0;2113c16e5528:40407 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43921/user/jenkins/test-data/4b039514-2647-14e9-144e-39d9b7cbc821/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/995a8f37c86448a7b9d82e7626517af1, entries=1, sequenceid=29, filesize=4.9 K 2024-12-05T00:28:27,072 INFO [M:0;2113c16e5528:40407 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 127ms, sequenceid=29, compaction requested=false 2024-12-05T00:28:27,073 INFO [M:0;2113c16e5528:40407 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-05T00:28:27,073 DEBUG [M:0;2113c16e5528:40407 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733358506944Disabling compacts and flushes for region at 1733358506944Disabling writes for close at 1733358506944Obtaining lock to block concurrent updates at 1733358506944Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733358506944Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=7850, getHeapSize=11544, getOffHeapSize=0, getCellsCount=36 at 1733358506944Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733358506945 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733358506945Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733358506964 (+19 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733358506964Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733358506973 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733358506988 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733358506988Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733358506997 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733358507011 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733358507011Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1733358507021 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1733358507042 (+21 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1733358507042Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@27e72a7d: reopening flushed file at 1733358507051 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4610cca0: reopening flushed file at 1733358507057 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@62fe2c37: reopening flushed file at 1733358507061 (+4 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3b3be0a5: reopening flushed file at 1733358507066 (+5 ms)Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 127ms, sequenceid=29, compaction requested=false at 1733358507072 (+6 ms)Writing region close event to WAL at 1733358507073 (+1 ms)Closed at 1733358507073 2024-12-05T00:28:27,074 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:28:27,074 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:28:27,074 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:28:27,074 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:28:27,074 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:28:27,076 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44693 is added to blk_1073741830_1006 (size=10311) 2024-12-05T00:28:27,076 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741830_1006 (size=10311) 2024-12-05T00:28:27,077 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-05T00:28:27,077 INFO [M:0;2113c16e5528:40407 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-05T00:28:27,077 INFO [M:0;2113c16e5528:40407 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:40407 2024-12-05T00:28:27,077 INFO [M:0;2113c16e5528:40407 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-05T00:28:27,179 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40407-0x1018005b97e0000, quorum=127.0.0.1:60225, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-05T00:28:27,179 INFO [M:0;2113c16e5528:40407 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-05T00:28:27,179 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40407-0x1018005b97e0000, quorum=127.0.0.1:60225, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-05T00:28:27,181 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2d0ad989{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-05T00:28:27,181 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7e58a9be{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-05T00:28:27,181 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-05T00:28:27,181 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2191d18b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-05T00:28:27,182 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4d7e0513{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/532e03e4-cec1-085d-e50d-436336e3c57c/hadoop.log.dir/,STOPPED} 2024-12-05T00:28:27,183 WARN [BP-805312939-172.17.0.2-1733358505084 heartbeating to localhost/127.0.0.1:43921 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-05T00:28:27,183 WARN [BP-805312939-172.17.0.2-1733358505084 heartbeating to localhost/127.0.0.1:43921 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-805312939-172.17.0.2-1733358505084 (Datanode Uuid a259d140-b6d8-4442-af8d-c84d828241af) service to localhost/127.0.0.1:43921 2024-12-05T00:28:27,183 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-05T00:28:27,183 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-05T00:28:27,183 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/532e03e4-cec1-085d-e50d-436336e3c57c/cluster_ede70a14-c4b0-f197-fc70-ae7362d8b6c6/data/data3/current/BP-805312939-172.17.0.2-1733358505084 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-05T00:28:27,184 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/532e03e4-cec1-085d-e50d-436336e3c57c/cluster_ede70a14-c4b0-f197-fc70-ae7362d8b6c6/data/data4/current/BP-805312939-172.17.0.2-1733358505084 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-05T00:28:27,184 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-05T00:28:27,186 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@a6bcfcb{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-05T00:28:27,186 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@60c41d21{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-05T00:28:27,186 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-05T00:28:27,186 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@665d17bc{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-05T00:28:27,186 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6a21ffab{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/532e03e4-cec1-085d-e50d-436336e3c57c/hadoop.log.dir/,STOPPED} 2024-12-05T00:28:27,187 WARN [BP-805312939-172.17.0.2-1733358505084 heartbeating to localhost/127.0.0.1:43921 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-05T00:28:27,187 WARN [BP-805312939-172.17.0.2-1733358505084 heartbeating to localhost/127.0.0.1:43921 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-805312939-172.17.0.2-1733358505084 (Datanode Uuid 41c51d21-00bb-489d-b261-f37af0c529db) service to localhost/127.0.0.1:43921 2024-12-05T00:28:27,187 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-05T00:28:27,187 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-05T00:28:27,187 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/532e03e4-cec1-085d-e50d-436336e3c57c/cluster_ede70a14-c4b0-f197-fc70-ae7362d8b6c6/data/data1/current/BP-805312939-172.17.0.2-1733358505084 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-05T00:28:27,188 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/532e03e4-cec1-085d-e50d-436336e3c57c/cluster_ede70a14-c4b0-f197-fc70-ae7362d8b6c6/data/data2/current/BP-805312939-172.17.0.2-1733358505084 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-05T00:28:27,188 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-05T00:28:27,193 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7c411d10{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-05T00:28:27,193 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5a06f288{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-05T00:28:27,194 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-05T00:28:27,194 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5a248f23{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-05T00:28:27,194 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3317cc02{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/532e03e4-cec1-085d-e50d-436336e3c57c/hadoop.log.dir/,STOPPED} 2024-12-05T00:28:27,200 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-05T00:28:27,212 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-05T00:28:27,221 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnNothingWritten Thread=269 (was 230) Potentially hanging thread: LeaseRenewer:jenkins@localhost:43921 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:43921 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: globalEventExecutor-1-20 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//io.netty.util.concurrent.GlobalEventExecutor.takeTask(GlobalEventExecutor.java:113) app//io.netty.util.concurrent.GlobalEventExecutor$TaskRunner.run(GlobalEventExecutor.java:259) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.7@localhost:43921 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:43921 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:43921 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HMaster-EventLoopGroup-16-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:43921 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:43921 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:43921 from jenkins.hfs.7 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-43-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=536 (was 509) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=19 (was 19), ProcessCount=11 (was 11), AvailableMemoryMB=8589 (was 8601)