2024-11-24 18:51:11,301 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca 2024-11-24 18:51:11,313 main DEBUG Took 0.010189 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-11-24 18:51:11,313 main DEBUG PluginManager 'Core' found 129 plugins 2024-11-24 18:51:11,313 main DEBUG PluginManager 'Level' found 0 plugins 2024-11-24 18:51:11,314 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-11-24 18:51:11,315 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-24 18:51:11,322 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-11-24 18:51:11,336 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-24 18:51:11,337 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-24 18:51:11,338 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-24 18:51:11,339 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-24 18:51:11,340 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-24 18:51:11,340 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-24 18:51:11,341 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-24 18:51:11,342 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-24 18:51:11,342 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-24 18:51:11,342 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-24 18:51:11,343 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-24 18:51:11,344 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-24 18:51:11,344 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-24 18:51:11,345 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-24 18:51:11,345 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-24 18:51:11,346 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-24 18:51:11,346 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-24 18:51:11,347 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-24 18:51:11,347 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-24 18:51:11,348 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-24 18:51:11,348 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-24 18:51:11,349 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-24 18:51:11,349 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-24 18:51:11,350 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-24 18:51:11,350 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-24 18:51:11,351 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-11-24 18:51:11,353 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-24 18:51:11,354 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-11-24 18:51:11,357 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-11-24 18:51:11,357 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-11-24 18:51:11,359 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-11-24 18:51:11,360 main DEBUG PluginManager 'Converter' found 47 plugins 2024-11-24 18:51:11,368 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-11-24 18:51:11,371 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-11-24 18:51:11,372 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-11-24 18:51:11,373 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-11-24 18:51:11,373 main DEBUG createAppenders(={Console}) 2024-11-24 18:51:11,374 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca initialized 2024-11-24 18:51:11,374 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca 2024-11-24 18:51:11,375 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca OK. 2024-11-24 18:51:11,375 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-11-24 18:51:11,376 main DEBUG OutputStream closed 2024-11-24 18:51:11,376 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-11-24 18:51:11,376 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-11-24 18:51:11,377 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@6404f418 OK 2024-11-24 18:51:11,465 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-11-24 18:51:11,468 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-11-24 18:51:11,469 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-11-24 18:51:11,470 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-11-24 18:51:11,471 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-11-24 18:51:11,471 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-11-24 18:51:11,472 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-11-24 18:51:11,472 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-11-24 18:51:11,472 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-11-24 18:51:11,473 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-11-24 18:51:11,473 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-11-24 18:51:11,474 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-11-24 18:51:11,474 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-11-24 18:51:11,475 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-11-24 18:51:11,475 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-11-24 18:51:11,475 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-11-24 18:51:11,475 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-11-24 18:51:11,476 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-11-24 18:51:11,479 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-24 18:51:11,479 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-logging/target/hbase-logging-4.0.0-alpha-1-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@6dab9b6d) with optional ClassLoader: null 2024-11-24 18:51:11,479 main DEBUG Shutdown hook enabled. Registering a new one. 2024-11-24 18:51:11,480 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@6dab9b6d] started OK. 2024-11-24T18:51:11,704 DEBUG [main {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9ded2da6-1b14-153c-0ff9-970bf5ed7568 2024-11-24 18:51:11,707 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-11-24 18:51:11,708 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-24T18:51:11,716 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.regionserver.wal.TestLogRolling timeout: 13 mins 2024-11-24T18:51:11,786 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testSlowSyncLogRolling Thread=12, OpenFileDescriptor=287, MaxFileDescriptor=1048576, SystemLoadAverage=310, ProcessCount=11, AvailableMemoryMB=9487 2024-11-24T18:51:11,789 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-24T18:51:11,811 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9ded2da6-1b14-153c-0ff9-970bf5ed7568/cluster_8fd6d39d-f1e7-d111-0b8b-0e415c4d9e0a, deleteOnExit=true 2024-11-24T18:51:11,811 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-24T18:51:11,813 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9ded2da6-1b14-153c-0ff9-970bf5ed7568/test.cache.data in system properties and HBase conf 2024-11-24T18:51:11,815 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9ded2da6-1b14-153c-0ff9-970bf5ed7568/hadoop.tmp.dir in system properties and HBase conf 2024-11-24T18:51:11,816 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9ded2da6-1b14-153c-0ff9-970bf5ed7568/hadoop.log.dir in system properties and HBase conf 2024-11-24T18:51:11,817 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9ded2da6-1b14-153c-0ff9-970bf5ed7568/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-24T18:51:11,818 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9ded2da6-1b14-153c-0ff9-970bf5ed7568/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-24T18:51:11,818 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-24T18:51:11,930 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-11-24T18:51:12,050 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-24T18:51:12,055 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9ded2da6-1b14-153c-0ff9-970bf5ed7568/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-24T18:51:12,056 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9ded2da6-1b14-153c-0ff9-970bf5ed7568/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-24T18:51:12,057 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9ded2da6-1b14-153c-0ff9-970bf5ed7568/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-24T18:51:12,058 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9ded2da6-1b14-153c-0ff9-970bf5ed7568/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-24T18:51:12,059 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9ded2da6-1b14-153c-0ff9-970bf5ed7568/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-24T18:51:12,059 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9ded2da6-1b14-153c-0ff9-970bf5ed7568/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-24T18:51:12,060 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9ded2da6-1b14-153c-0ff9-970bf5ed7568/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-24T18:51:12,061 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9ded2da6-1b14-153c-0ff9-970bf5ed7568/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-24T18:51:12,061 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9ded2da6-1b14-153c-0ff9-970bf5ed7568/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-24T18:51:12,062 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9ded2da6-1b14-153c-0ff9-970bf5ed7568/nfs.dump.dir in system properties and HBase conf 2024-11-24T18:51:12,063 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9ded2da6-1b14-153c-0ff9-970bf5ed7568/java.io.tmpdir in system properties and HBase conf 2024-11-24T18:51:12,063 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9ded2da6-1b14-153c-0ff9-970bf5ed7568/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-24T18:51:12,064 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9ded2da6-1b14-153c-0ff9-970bf5ed7568/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-24T18:51:12,065 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9ded2da6-1b14-153c-0ff9-970bf5ed7568/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-24T18:51:12,608 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-24T18:51:13,287 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-11-24T18:51:13,365 INFO [Time-limited test {}] log.Log(170): Logging initialized @2716ms to org.eclipse.jetty.util.log.Slf4jLog 2024-11-24T18:51:13,434 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T18:51:13,500 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-24T18:51:13,522 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-24T18:51:13,523 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-24T18:51:13,524 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-24T18:51:13,537 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T18:51:13,540 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@ddc8467{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9ded2da6-1b14-153c-0ff9-970bf5ed7568/hadoop.log.dir/,AVAILABLE} 2024-11-24T18:51:13,541 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@70be1389{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-24T18:51:13,731 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@735fa16a{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9ded2da6-1b14-153c-0ff9-970bf5ed7568/java.io.tmpdir/jetty-localhost-36507-hadoop-hdfs-3_4_1-tests_jar-_-any-11176223343628306458/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-24T18:51:13,737 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6c26a5a3{HTTP/1.1, (http/1.1)}{localhost:36507} 2024-11-24T18:51:13,738 INFO [Time-limited test {}] server.Server(415): Started @3090ms 2024-11-24T18:51:13,768 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-24T18:51:14,547 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T18:51:14,558 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-24T18:51:14,561 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-24T18:51:14,562 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-24T18:51:14,562 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-24T18:51:14,563 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@28778f0f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9ded2da6-1b14-153c-0ff9-970bf5ed7568/hadoop.log.dir/,AVAILABLE} 2024-11-24T18:51:14,564 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@371e191c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-24T18:51:14,710 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7b07d1ba{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9ded2da6-1b14-153c-0ff9-970bf5ed7568/java.io.tmpdir/jetty-localhost-34803-hadoop-hdfs-3_4_1-tests_jar-_-any-6243746791166052744/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T18:51:14,711 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@43e0a762{HTTP/1.1, (http/1.1)}{localhost:34803} 2024-11-24T18:51:14,711 INFO [Time-limited test {}] server.Server(415): Started @4064ms 2024-11-24T18:51:14,777 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-24T18:51:14,910 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T18:51:14,918 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-24T18:51:14,926 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-24T18:51:14,926 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-24T18:51:14,926 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-24T18:51:14,929 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@11effdcd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9ded2da6-1b14-153c-0ff9-970bf5ed7568/hadoop.log.dir/,AVAILABLE} 2024-11-24T18:51:14,930 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2d48d695{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-24T18:51:15,074 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1bf97579{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9ded2da6-1b14-153c-0ff9-970bf5ed7568/java.io.tmpdir/jetty-localhost-45461-hadoop-hdfs-3_4_1-tests_jar-_-any-3850418058502265212/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T18:51:15,075 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@22b88bcb{HTTP/1.1, (http/1.1)}{localhost:45461} 2024-11-24T18:51:15,075 INFO [Time-limited test {}] server.Server(415): Started @4427ms 2024-11-24T18:51:15,078 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-24T18:51:16,867 WARN [Thread-101 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9ded2da6-1b14-153c-0ff9-970bf5ed7568/cluster_8fd6d39d-f1e7-d111-0b8b-0e415c4d9e0a/data/data4/current/BP-1886669259-172.17.0.2-1732474272711/current, will proceed with Du for space computation calculation, 2024-11-24T18:51:16,867 WARN [Thread-99 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9ded2da6-1b14-153c-0ff9-970bf5ed7568/cluster_8fd6d39d-f1e7-d111-0b8b-0e415c4d9e0a/data/data3/current/BP-1886669259-172.17.0.2-1732474272711/current, will proceed with Du for space computation calculation, 2024-11-24T18:51:16,867 WARN [Thread-100 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9ded2da6-1b14-153c-0ff9-970bf5ed7568/cluster_8fd6d39d-f1e7-d111-0b8b-0e415c4d9e0a/data/data2/current/BP-1886669259-172.17.0.2-1732474272711/current, will proceed with Du for space computation calculation, 2024-11-24T18:51:16,867 WARN [Thread-98 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9ded2da6-1b14-153c-0ff9-970bf5ed7568/cluster_8fd6d39d-f1e7-d111-0b8b-0e415c4d9e0a/data/data1/current/BP-1886669259-172.17.0.2-1732474272711/current, will proceed with Du for space computation calculation, 2024-11-24T18:51:16,902 WARN [Thread-81 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-24T18:51:16,904 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-24T18:51:16,951 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x8ba4250ceb1a3430 with lease ID 0x44c16a9b06904663: Processing first storage report for DS-274a7b26-166f-489a-96f9-afbae2c94bf1 from datanode DatanodeRegistration(127.0.0.1:46307, datanodeUuid=19d4d744-57ac-4c09-864c-5dfbf9012692, infoPort=43553, infoSecurePort=0, ipcPort=37703, storageInfo=lv=-57;cid=testClusterID;nsid=85570133;c=1732474272711) 2024-11-24T18:51:16,953 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x8ba4250ceb1a3430 with lease ID 0x44c16a9b06904663: from storage DS-274a7b26-166f-489a-96f9-afbae2c94bf1 node DatanodeRegistration(127.0.0.1:46307, datanodeUuid=19d4d744-57ac-4c09-864c-5dfbf9012692, infoPort=43553, infoSecurePort=0, ipcPort=37703, storageInfo=lv=-57;cid=testClusterID;nsid=85570133;c=1732474272711), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-24T18:51:16,953 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x3cde32d844309db4 with lease ID 0x44c16a9b06904662: Processing first storage report for DS-454ffbd1-f0ee-406e-8b8b-fd819823a078 from datanode DatanodeRegistration(127.0.0.1:42819, datanodeUuid=ad9e1b75-2c4c-4d0a-ae2c-b40b27af35c5, infoPort=46037, infoSecurePort=0, ipcPort=37433, storageInfo=lv=-57;cid=testClusterID;nsid=85570133;c=1732474272711) 2024-11-24T18:51:16,954 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x3cde32d844309db4 with lease ID 0x44c16a9b06904662: from storage DS-454ffbd1-f0ee-406e-8b8b-fd819823a078 node DatanodeRegistration(127.0.0.1:42819, datanodeUuid=ad9e1b75-2c4c-4d0a-ae2c-b40b27af35c5, infoPort=46037, infoSecurePort=0, ipcPort=37433, storageInfo=lv=-57;cid=testClusterID;nsid=85570133;c=1732474272711), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T18:51:16,954 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x8ba4250ceb1a3430 with lease ID 0x44c16a9b06904663: Processing first storage report for DS-3dbf2366-2b3d-4281-9a8a-f4c33a44cd6e from datanode DatanodeRegistration(127.0.0.1:46307, datanodeUuid=19d4d744-57ac-4c09-864c-5dfbf9012692, infoPort=43553, infoSecurePort=0, ipcPort=37703, storageInfo=lv=-57;cid=testClusterID;nsid=85570133;c=1732474272711) 2024-11-24T18:51:16,954 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x8ba4250ceb1a3430 with lease ID 0x44c16a9b06904663: from storage DS-3dbf2366-2b3d-4281-9a8a-f4c33a44cd6e node DatanodeRegistration(127.0.0.1:46307, datanodeUuid=19d4d744-57ac-4c09-864c-5dfbf9012692, infoPort=43553, infoSecurePort=0, ipcPort=37703, storageInfo=lv=-57;cid=testClusterID;nsid=85570133;c=1732474272711), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T18:51:16,955 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x3cde32d844309db4 with lease ID 0x44c16a9b06904662: Processing first storage report for DS-e91c77a7-556f-4f9c-a04a-d8c44b67bf33 from datanode DatanodeRegistration(127.0.0.1:42819, datanodeUuid=ad9e1b75-2c4c-4d0a-ae2c-b40b27af35c5, infoPort=46037, infoSecurePort=0, ipcPort=37433, storageInfo=lv=-57;cid=testClusterID;nsid=85570133;c=1732474272711) 2024-11-24T18:51:16,955 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x3cde32d844309db4 with lease ID 0x44c16a9b06904662: from storage DS-e91c77a7-556f-4f9c-a04a-d8c44b67bf33 node DatanodeRegistration(127.0.0.1:42819, datanodeUuid=ad9e1b75-2c4c-4d0a-ae2c-b40b27af35c5, infoPort=46037, infoSecurePort=0, ipcPort=37433, storageInfo=lv=-57;cid=testClusterID;nsid=85570133;c=1732474272711), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T18:51:17,034 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9ded2da6-1b14-153c-0ff9-970bf5ed7568 2024-11-24T18:51:17,104 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9ded2da6-1b14-153c-0ff9-970bf5ed7568/cluster_8fd6d39d-f1e7-d111-0b8b-0e415c4d9e0a/zookeeper_0, clientPort=64439, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9ded2da6-1b14-153c-0ff9-970bf5ed7568/cluster_8fd6d39d-f1e7-d111-0b8b-0e415c4d9e0a/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9ded2da6-1b14-153c-0ff9-970bf5ed7568/cluster_8fd6d39d-f1e7-d111-0b8b-0e415c4d9e0a/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-24T18:51:17,116 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=64439 2024-11-24T18:51:17,128 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T18:51:17,130 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T18:51:17,359 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46307 is added to blk_1073741825_1001 (size=7) 2024-11-24T18:51:17,360 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42819 is added to blk_1073741825_1001 (size=7) 2024-11-24T18:51:17,766 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:40437/user/jenkins/test-data/91bb223a-a964-b097-0799-aa34536414f2 with version=8 2024-11-24T18:51:17,766 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:40437/user/jenkins/test-data/91bb223a-a964-b097-0799-aa34536414f2/hbase-staging 2024-11-24T18:51:17,869 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-11-24T18:51:18,118 INFO [Time-limited test {}] client.ConnectionUtils(128): master/f2b92657890a:0 server-side Connection retries=45 2024-11-24T18:51:18,130 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-24T18:51:18,131 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-24T18:51:18,135 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-24T18:51:18,136 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-24T18:51:18,136 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-24T18:51:18,273 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-24T18:51:18,331 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-11-24T18:51:18,341 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-11-24T18:51:18,345 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-24T18:51:18,369 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 58693 (auto-detected) 2024-11-24T18:51:18,370 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-11-24T18:51:18,388 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:44605 2024-11-24T18:51:18,408 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:44605 connecting to ZooKeeper ensemble=127.0.0.1:64439 2024-11-24T18:51:18,521 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:446050x0, quorum=127.0.0.1:64439, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-24T18:51:18,525 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:44605-0x1016e2f888c0000 connected 2024-11-24T18:51:18,616 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T18:51:18,618 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T18:51:18,628 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:44605-0x1016e2f888c0000, quorum=127.0.0.1:64439, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-24T18:51:18,632 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:40437/user/jenkins/test-data/91bb223a-a964-b097-0799-aa34536414f2, hbase.cluster.distributed=false 2024-11-24T18:51:18,658 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:44605-0x1016e2f888c0000, quorum=127.0.0.1:64439, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-24T18:51:18,665 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=44605 2024-11-24T18:51:18,666 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=44605 2024-11-24T18:51:18,666 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=44605 2024-11-24T18:51:18,667 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=44605 2024-11-24T18:51:18,667 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=44605 2024-11-24T18:51:18,776 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/f2b92657890a:0 server-side Connection retries=45 2024-11-24T18:51:18,778 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-24T18:51:18,779 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-24T18:51:18,779 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-24T18:51:18,779 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-24T18:51:18,780 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-24T18:51:18,783 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-24T18:51:18,786 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-24T18:51:18,787 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:33415 2024-11-24T18:51:18,789 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:33415 connecting to ZooKeeper ensemble=127.0.0.1:64439 2024-11-24T18:51:18,791 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T18:51:18,796 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T18:51:18,814 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:334150x0, quorum=127.0.0.1:64439, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-24T18:51:18,815 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:334150x0, quorum=127.0.0.1:64439, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-24T18:51:18,815 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:33415-0x1016e2f888c0001 connected 2024-11-24T18:51:18,820 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-24T18:51:18,829 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-24T18:51:18,833 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33415-0x1016e2f888c0001, quorum=127.0.0.1:64439, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-24T18:51:18,840 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33415-0x1016e2f888c0001, quorum=127.0.0.1:64439, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-24T18:51:18,841 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=33415 2024-11-24T18:51:18,842 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=33415 2024-11-24T18:51:18,844 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=33415 2024-11-24T18:51:18,847 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=33415 2024-11-24T18:51:18,847 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=33415 2024-11-24T18:51:18,863 DEBUG [M:0;f2b92657890a:44605 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;f2b92657890a:44605 2024-11-24T18:51:18,864 INFO [master/f2b92657890a:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/f2b92657890a,44605,1732474277958 2024-11-24T18:51:18,877 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44605-0x1016e2f888c0000, quorum=127.0.0.1:64439, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-24T18:51:18,877 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33415-0x1016e2f888c0001, quorum=127.0.0.1:64439, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-24T18:51:18,879 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:44605-0x1016e2f888c0000, quorum=127.0.0.1:64439, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/f2b92657890a,44605,1732474277958 2024-11-24T18:51:18,909 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33415-0x1016e2f888c0001, quorum=127.0.0.1:64439, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-24T18:51:18,909 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44605-0x1016e2f888c0000, quorum=127.0.0.1:64439, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T18:51:18,909 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33415-0x1016e2f888c0001, quorum=127.0.0.1:64439, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T18:51:18,910 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:44605-0x1016e2f888c0000, quorum=127.0.0.1:64439, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-24T18:51:18,911 INFO [master/f2b92657890a:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/f2b92657890a,44605,1732474277958 from backup master directory 2024-11-24T18:51:18,925 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33415-0x1016e2f888c0001, quorum=127.0.0.1:64439, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-24T18:51:18,925 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44605-0x1016e2f888c0000, quorum=127.0.0.1:64439, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/f2b92657890a,44605,1732474277958 2024-11-24T18:51:18,925 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44605-0x1016e2f888c0000, quorum=127.0.0.1:64439, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-24T18:51:18,926 WARN [master/f2b92657890a:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-24T18:51:18,926 INFO [master/f2b92657890a:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=f2b92657890a,44605,1732474277958 2024-11-24T18:51:18,928 INFO [master/f2b92657890a:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-11-24T18:51:18,929 INFO [master/f2b92657890a:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-11-24T18:51:18,977 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:40437/user/jenkins/test-data/91bb223a-a964-b097-0799-aa34536414f2/hbase.id] with ID: e0995372-115f-4ff6-a6bb-7c9773ea57e4 2024-11-24T18:51:18,978 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:40437/user/jenkins/test-data/91bb223a-a964-b097-0799-aa34536414f2/.tmp/hbase.id 2024-11-24T18:51:18,990 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46307 is added to blk_1073741826_1002 (size=42) 2024-11-24T18:51:18,990 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42819 is added to blk_1073741826_1002 (size=42) 2024-11-24T18:51:18,991 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:40437/user/jenkins/test-data/91bb223a-a964-b097-0799-aa34536414f2/.tmp/hbase.id]:[hdfs://localhost:40437/user/jenkins/test-data/91bb223a-a964-b097-0799-aa34536414f2/hbase.id] 2024-11-24T18:51:19,036 INFO [master/f2b92657890a:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T18:51:19,041 INFO [master/f2b92657890a:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-24T18:51:19,058 INFO [master/f2b92657890a:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 16ms. 2024-11-24T18:51:19,072 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44605-0x1016e2f888c0000, quorum=127.0.0.1:64439, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T18:51:19,072 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33415-0x1016e2f888c0001, quorum=127.0.0.1:64439, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T18:51:19,087 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42819 is added to blk_1073741827_1003 (size=196) 2024-11-24T18:51:19,087 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46307 is added to blk_1073741827_1003 (size=196) 2024-11-24T18:51:19,101 INFO [master/f2b92657890a:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-24T18:51:19,103 INFO [master/f2b92657890a:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-24T18:51:19,107 INFO [master/f2b92657890a:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-24T18:51:19,137 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46307 is added to blk_1073741828_1004 (size=1189) 2024-11-24T18:51:19,138 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42819 is added to blk_1073741828_1004 (size=1189) 2024-11-24T18:51:19,556 INFO [master/f2b92657890a:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:40437/user/jenkins/test-data/91bb223a-a964-b097-0799-aa34536414f2/MasterData/data/master/store 2024-11-24T18:51:19,576 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42819 is added to blk_1073741829_1005 (size=34) 2024-11-24T18:51:19,576 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46307 is added to blk_1073741829_1005 (size=34) 2024-11-24T18:51:19,580 INFO [master/f2b92657890a:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-11-24T18:51:19,584 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T18:51:19,586 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-24T18:51:19,586 INFO [master/f2b92657890a:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T18:51:19,587 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T18:51:19,589 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-24T18:51:19,589 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T18:51:19,589 INFO [master/f2b92657890a:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T18:51:19,591 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732474279586Disabling compacts and flushes for region at 1732474279586Disabling writes for close at 1732474279589 (+3 ms)Writing region close event to WAL at 1732474279589Closed at 1732474279589 2024-11-24T18:51:19,593 WARN [master/f2b92657890a:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:40437/user/jenkins/test-data/91bb223a-a964-b097-0799-aa34536414f2/MasterData/data/master/store/.initializing 2024-11-24T18:51:19,593 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:40437/user/jenkins/test-data/91bb223a-a964-b097-0799-aa34536414f2/MasterData/WALs/f2b92657890a,44605,1732474277958 2024-11-24T18:51:19,616 INFO [master/f2b92657890a:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=f2b92657890a%2C44605%2C1732474277958, suffix=, logDir=hdfs://localhost:40437/user/jenkins/test-data/91bb223a-a964-b097-0799-aa34536414f2/MasterData/WALs/f2b92657890a,44605,1732474277958, archiveDir=hdfs://localhost:40437/user/jenkins/test-data/91bb223a-a964-b097-0799-aa34536414f2/MasterData/oldWALs, maxLogs=10 2024-11-24T18:51:19,625 INFO [master/f2b92657890a:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor f2b92657890a%2C44605%2C1732474277958.1732474279621 2024-11-24T18:51:19,642 INFO [master/f2b92657890a:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/91bb223a-a964-b097-0799-aa34536414f2/MasterData/WALs/f2b92657890a,44605,1732474277958/f2b92657890a%2C44605%2C1732474277958.1732474279621 2024-11-24T18:51:19,649 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46037:46037),(127.0.0.1/127.0.0.1:43553:43553)] 2024-11-24T18:51:19,652 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-24T18:51:19,652 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T18:51:19,655 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T18:51:19,656 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T18:51:19,694 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-24T18:51:19,716 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-24T18:51:19,719 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T18:51:19,721 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T18:51:19,721 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-24T18:51:19,725 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-24T18:51:19,725 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T18:51:19,726 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T18:51:19,726 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-24T18:51:19,729 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-24T18:51:19,730 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T18:51:19,731 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T18:51:19,731 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-24T18:51:19,734 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-24T18:51:19,734 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T18:51:19,735 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T18:51:19,735 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T18:51:19,739 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40437/user/jenkins/test-data/91bb223a-a964-b097-0799-aa34536414f2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-24T18:51:19,740 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40437/user/jenkins/test-data/91bb223a-a964-b097-0799-aa34536414f2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-24T18:51:19,745 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T18:51:19,745 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T18:51:19,748 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-24T18:51:19,752 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T18:51:19,757 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40437/user/jenkins/test-data/91bb223a-a964-b097-0799-aa34536414f2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-24T18:51:19,759 INFO [master/f2b92657890a:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=858835, jitterRate=0.09206627309322357}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-24T18:51:19,768 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732474279667Initializing all the Stores at 1732474279669 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732474279669Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732474279670 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732474279670Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732474279670Cleaning up temporary data from old regions at 1732474279746 (+76 ms)Region opened successfully at 1732474279768 (+22 ms) 2024-11-24T18:51:19,770 INFO [master/f2b92657890a:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-24T18:51:19,804 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6344f200, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=f2b92657890a/172.17.0.2:0 2024-11-24T18:51:19,837 INFO [master/f2b92657890a:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-24T18:51:19,846 INFO [master/f2b92657890a:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-24T18:51:19,846 INFO [master/f2b92657890a:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-24T18:51:19,849 INFO [master/f2b92657890a:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-24T18:51:19,850 INFO [master/f2b92657890a:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 1 msec 2024-11-24T18:51:19,854 INFO [master/f2b92657890a:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 4 msec 2024-11-24T18:51:19,855 INFO [master/f2b92657890a:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-24T18:51:19,878 INFO [master/f2b92657890a:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-24T18:51:19,886 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44605-0x1016e2f888c0000, quorum=127.0.0.1:64439, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-24T18:51:19,940 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-24T18:51:19,946 INFO [master/f2b92657890a:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-24T18:51:19,948 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44605-0x1016e2f888c0000, quorum=127.0.0.1:64439, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-24T18:51:19,961 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-24T18:51:19,963 INFO [master/f2b92657890a:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-24T18:51:19,967 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44605-0x1016e2f888c0000, quorum=127.0.0.1:64439, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-24T18:51:19,977 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-24T18:51:19,979 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44605-0x1016e2f888c0000, quorum=127.0.0.1:64439, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-24T18:51:19,988 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-24T18:51:20,008 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44605-0x1016e2f888c0000, quorum=127.0.0.1:64439, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-24T18:51:20,019 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-24T18:51:20,035 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44605-0x1016e2f888c0000, quorum=127.0.0.1:64439, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-24T18:51:20,035 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33415-0x1016e2f888c0001, quorum=127.0.0.1:64439, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-24T18:51:20,035 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44605-0x1016e2f888c0000, quorum=127.0.0.1:64439, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T18:51:20,035 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33415-0x1016e2f888c0001, quorum=127.0.0.1:64439, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T18:51:20,039 INFO [master/f2b92657890a:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=f2b92657890a,44605,1732474277958, sessionid=0x1016e2f888c0000, setting cluster-up flag (Was=false) 2024-11-24T18:51:20,072 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33415-0x1016e2f888c0001, quorum=127.0.0.1:64439, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T18:51:20,072 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44605-0x1016e2f888c0000, quorum=127.0.0.1:64439, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T18:51:20,104 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-24T18:51:20,108 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=f2b92657890a,44605,1732474277958 2024-11-24T18:51:20,129 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44605-0x1016e2f888c0000, quorum=127.0.0.1:64439, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T18:51:20,129 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33415-0x1016e2f888c0001, quorum=127.0.0.1:64439, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T18:51:20,162 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-24T18:51:20,167 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=f2b92657890a,44605,1732474277958 2024-11-24T18:51:20,177 INFO [master/f2b92657890a:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:40437/user/jenkins/test-data/91bb223a-a964-b097-0799-aa34536414f2/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-24T18:51:20,247 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-24T18:51:20,252 INFO [RS:0;f2b92657890a:33415 {}] regionserver.HRegionServer(746): ClusterId : e0995372-115f-4ff6-a6bb-7c9773ea57e4 2024-11-24T18:51:20,254 DEBUG [RS:0;f2b92657890a:33415 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-24T18:51:20,258 INFO [master/f2b92657890a:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-24T18:51:20,264 INFO [master/f2b92657890a:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-24T18:51:20,269 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: f2b92657890a,44605,1732474277958 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-24T18:51:20,458 DEBUG [RS:0;f2b92657890a:33415 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-24T18:51:20,458 DEBUG [RS:0;f2b92657890a:33415 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-24T18:51:20,459 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/f2b92657890a:0, corePoolSize=5, maxPoolSize=5 2024-11-24T18:51:20,459 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/f2b92657890a:0, corePoolSize=5, maxPoolSize=5 2024-11-24T18:51:20,459 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/f2b92657890a:0, corePoolSize=5, maxPoolSize=5 2024-11-24T18:51:20,459 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/f2b92657890a:0, corePoolSize=5, maxPoolSize=5 2024-11-24T18:51:20,460 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/f2b92657890a:0, corePoolSize=10, maxPoolSize=10 2024-11-24T18:51:20,460 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/f2b92657890a:0, corePoolSize=1, maxPoolSize=1 2024-11-24T18:51:20,460 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/f2b92657890a:0, corePoolSize=2, maxPoolSize=2 2024-11-24T18:51:20,460 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/f2b92657890a:0, corePoolSize=1, maxPoolSize=1 2024-11-24T18:51:20,462 INFO [master/f2b92657890a:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732474310462 2024-11-24T18:51:20,463 INFO [master/f2b92657890a:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-24T18:51:20,464 INFO [master/f2b92657890a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-24T18:51:20,467 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-24T18:51:20,467 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-24T18:51:20,468 DEBUG [RS:0;f2b92657890a:33415 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-24T18:51:20,469 DEBUG [RS:0;f2b92657890a:33415 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5f5edd0c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=f2b92657890a/172.17.0.2:0 2024-11-24T18:51:20,469 INFO [master/f2b92657890a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-24T18:51:20,470 INFO [master/f2b92657890a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-24T18:51:20,470 INFO [master/f2b92657890a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-24T18:51:20,470 INFO [master/f2b92657890a:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-24T18:51:20,471 INFO [master/f2b92657890a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-24T18:51:20,475 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T18:51:20,475 INFO [master/f2b92657890a:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-24T18:51:20,475 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-24T18:51:20,476 INFO [master/f2b92657890a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-24T18:51:20,477 INFO [master/f2b92657890a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-24T18:51:20,479 INFO [master/f2b92657890a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-24T18:51:20,480 INFO [master/f2b92657890a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-24T18:51:20,481 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/f2b92657890a:0:becomeActiveMaster-HFileCleaner.large.0-1732474280481,5,FailOnTimeoutGroup] 2024-11-24T18:51:20,482 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/f2b92657890a:0:becomeActiveMaster-HFileCleaner.small.0-1732474280482,5,FailOnTimeoutGroup] 2024-11-24T18:51:20,482 INFO [master/f2b92657890a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-24T18:51:20,482 INFO [master/f2b92657890a:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-24T18:51:20,483 INFO [master/f2b92657890a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-24T18:51:20,484 INFO [master/f2b92657890a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-24T18:51:20,486 DEBUG [RS:0;f2b92657890a:33415 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;f2b92657890a:33415 2024-11-24T18:51:20,489 INFO [RS:0;f2b92657890a:33415 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-24T18:51:20,489 INFO [RS:0;f2b92657890a:33415 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-24T18:51:20,489 DEBUG [RS:0;f2b92657890a:33415 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-24T18:51:20,490 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42819 is added to blk_1073741831_1007 (size=1321) 2024-11-24T18:51:20,490 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46307 is added to blk_1073741831_1007 (size=1321) 2024-11-24T18:51:20,492 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:40437/user/jenkins/test-data/91bb223a-a964-b097-0799-aa34536414f2/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-24T18:51:20,492 INFO [RS:0;f2b92657890a:33415 {}] regionserver.HRegionServer(2659): reportForDuty to master=f2b92657890a,44605,1732474277958 with port=33415, startcode=1732474278738 2024-11-24T18:51:20,493 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:40437/user/jenkins/test-data/91bb223a-a964-b097-0799-aa34536414f2 2024-11-24T18:51:20,503 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46307 is added to blk_1073741832_1008 (size=32) 2024-11-24T18:51:20,504 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42819 is added to blk_1073741832_1008 (size=32) 2024-11-24T18:51:20,505 DEBUG [RS:0;f2b92657890a:33415 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-24T18:51:20,505 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T18:51:20,507 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-24T18:51:20,510 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-24T18:51:20,510 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T18:51:20,511 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T18:51:20,511 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-24T18:51:20,513 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-24T18:51:20,513 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T18:51:20,514 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T18:51:20,514 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-24T18:51:20,517 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-24T18:51:20,517 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T18:51:20,518 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T18:51:20,518 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-24T18:51:20,520 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-24T18:51:20,520 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T18:51:20,521 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T18:51:20,522 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-24T18:51:20,523 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40437/user/jenkins/test-data/91bb223a-a964-b097-0799-aa34536414f2/data/hbase/meta/1588230740 2024-11-24T18:51:20,523 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40437/user/jenkins/test-data/91bb223a-a964-b097-0799-aa34536414f2/data/hbase/meta/1588230740 2024-11-24T18:51:20,526 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-24T18:51:20,526 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-24T18:51:20,527 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-24T18:51:20,529 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-24T18:51:20,532 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40437/user/jenkins/test-data/91bb223a-a964-b097-0799-aa34536414f2/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-24T18:51:20,533 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=782094, jitterRate=-0.00551643967628479}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-24T18:51:20,537 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732474280506Initializing all the Stores at 1732474280507 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732474280507Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732474280507Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732474280507Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732474280507Cleaning up temporary data from old regions at 1732474280526 (+19 ms)Region opened successfully at 1732474280537 (+11 ms) 2024-11-24T18:51:20,537 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-24T18:51:20,537 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-24T18:51:20,537 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-24T18:51:20,537 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-24T18:51:20,537 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-24T18:51:20,538 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-24T18:51:20,539 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732474280537Disabling compacts and flushes for region at 1732474280537Disabling writes for close at 1732474280537Writing region close event to WAL at 1732474280538 (+1 ms)Closed at 1732474280538 2024-11-24T18:51:20,541 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-24T18:51:20,541 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-24T18:51:20,547 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-24T18:51:20,556 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-24T18:51:20,558 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-24T18:51:20,570 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38855, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-11-24T18:51:20,575 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44605 {}] master.ServerManager(363): Checking decommissioned status of RegionServer f2b92657890a,33415,1732474278738 2024-11-24T18:51:20,576 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44605 {}] master.ServerManager(517): Registering regionserver=f2b92657890a,33415,1732474278738 2024-11-24T18:51:20,589 DEBUG [RS:0;f2b92657890a:33415 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:40437/user/jenkins/test-data/91bb223a-a964-b097-0799-aa34536414f2 2024-11-24T18:51:20,589 DEBUG [RS:0;f2b92657890a:33415 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:40437 2024-11-24T18:51:20,589 DEBUG [RS:0;f2b92657890a:33415 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-24T18:51:20,641 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44605-0x1016e2f888c0000, quorum=127.0.0.1:64439, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-24T18:51:20,641 DEBUG [RS:0;f2b92657890a:33415 {}] zookeeper.ZKUtil(111): regionserver:33415-0x1016e2f888c0001, quorum=127.0.0.1:64439, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/f2b92657890a,33415,1732474278738 2024-11-24T18:51:20,641 WARN [RS:0;f2b92657890a:33415 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-24T18:51:20,642 INFO [RS:0;f2b92657890a:33415 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-24T18:51:20,642 DEBUG [RS:0;f2b92657890a:33415 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:40437/user/jenkins/test-data/91bb223a-a964-b097-0799-aa34536414f2/WALs/f2b92657890a,33415,1732474278738 2024-11-24T18:51:20,644 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [f2b92657890a,33415,1732474278738] 2024-11-24T18:51:20,663 INFO [RS:0;f2b92657890a:33415 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-24T18:51:20,677 INFO [RS:0;f2b92657890a:33415 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-24T18:51:20,681 INFO [RS:0;f2b92657890a:33415 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-24T18:51:20,682 INFO [RS:0;f2b92657890a:33415 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T18:51:20,683 INFO [RS:0;f2b92657890a:33415 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-24T18:51:20,687 INFO [RS:0;f2b92657890a:33415 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-24T18:51:20,689 INFO [RS:0;f2b92657890a:33415 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-24T18:51:20,689 DEBUG [RS:0;f2b92657890a:33415 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/f2b92657890a:0, corePoolSize=1, maxPoolSize=1 2024-11-24T18:51:20,689 DEBUG [RS:0;f2b92657890a:33415 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/f2b92657890a:0, corePoolSize=1, maxPoolSize=1 2024-11-24T18:51:20,689 DEBUG [RS:0;f2b92657890a:33415 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/f2b92657890a:0, corePoolSize=1, maxPoolSize=1 2024-11-24T18:51:20,689 DEBUG [RS:0;f2b92657890a:33415 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/f2b92657890a:0, corePoolSize=1, maxPoolSize=1 2024-11-24T18:51:20,689 DEBUG [RS:0;f2b92657890a:33415 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/f2b92657890a:0, corePoolSize=1, maxPoolSize=1 2024-11-24T18:51:20,690 DEBUG [RS:0;f2b92657890a:33415 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/f2b92657890a:0, corePoolSize=2, maxPoolSize=2 2024-11-24T18:51:20,690 DEBUG [RS:0;f2b92657890a:33415 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/f2b92657890a:0, corePoolSize=1, maxPoolSize=1 2024-11-24T18:51:20,690 DEBUG [RS:0;f2b92657890a:33415 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/f2b92657890a:0, corePoolSize=1, maxPoolSize=1 2024-11-24T18:51:20,690 DEBUG [RS:0;f2b92657890a:33415 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/f2b92657890a:0, corePoolSize=1, maxPoolSize=1 2024-11-24T18:51:20,690 DEBUG [RS:0;f2b92657890a:33415 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/f2b92657890a:0, corePoolSize=1, maxPoolSize=1 2024-11-24T18:51:20,690 DEBUG [RS:0;f2b92657890a:33415 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/f2b92657890a:0, corePoolSize=1, maxPoolSize=1 2024-11-24T18:51:20,690 DEBUG [RS:0;f2b92657890a:33415 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/f2b92657890a:0, corePoolSize=1, maxPoolSize=1 2024-11-24T18:51:20,690 DEBUG [RS:0;f2b92657890a:33415 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/f2b92657890a:0, corePoolSize=3, maxPoolSize=3 2024-11-24T18:51:20,691 DEBUG [RS:0;f2b92657890a:33415 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/f2b92657890a:0, corePoolSize=3, maxPoolSize=3 2024-11-24T18:51:20,691 INFO [RS:0;f2b92657890a:33415 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-24T18:51:20,692 INFO [RS:0;f2b92657890a:33415 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-24T18:51:20,692 INFO [RS:0;f2b92657890a:33415 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T18:51:20,692 INFO [RS:0;f2b92657890a:33415 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-24T18:51:20,692 INFO [RS:0;f2b92657890a:33415 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-24T18:51:20,692 INFO [RS:0;f2b92657890a:33415 {}] hbase.ChoreService(168): Chore ScheduledChore name=f2b92657890a,33415,1732474278738-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-24T18:51:20,708 INFO [RS:0;f2b92657890a:33415 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-24T18:51:20,709 WARN [f2b92657890a:44605 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-24T18:51:20,710 INFO [RS:0;f2b92657890a:33415 {}] hbase.ChoreService(168): Chore ScheduledChore name=f2b92657890a,33415,1732474278738-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T18:51:20,710 INFO [RS:0;f2b92657890a:33415 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T18:51:20,710 INFO [RS:0;f2b92657890a:33415 {}] regionserver.Replication(171): f2b92657890a,33415,1732474278738 started 2024-11-24T18:51:20,726 INFO [RS:0;f2b92657890a:33415 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T18:51:20,727 INFO [RS:0;f2b92657890a:33415 {}] regionserver.HRegionServer(1482): Serving as f2b92657890a,33415,1732474278738, RpcServer on f2b92657890a/172.17.0.2:33415, sessionid=0x1016e2f888c0001 2024-11-24T18:51:20,727 DEBUG [RS:0;f2b92657890a:33415 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-24T18:51:20,727 DEBUG [RS:0;f2b92657890a:33415 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager f2b92657890a,33415,1732474278738 2024-11-24T18:51:20,727 DEBUG [RS:0;f2b92657890a:33415 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'f2b92657890a,33415,1732474278738' 2024-11-24T18:51:20,728 DEBUG [RS:0;f2b92657890a:33415 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-24T18:51:20,729 DEBUG [RS:0;f2b92657890a:33415 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-24T18:51:20,729 DEBUG [RS:0;f2b92657890a:33415 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-24T18:51:20,729 DEBUG [RS:0;f2b92657890a:33415 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-24T18:51:20,730 DEBUG [RS:0;f2b92657890a:33415 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager f2b92657890a,33415,1732474278738 2024-11-24T18:51:20,730 DEBUG [RS:0;f2b92657890a:33415 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'f2b92657890a,33415,1732474278738' 2024-11-24T18:51:20,730 DEBUG [RS:0;f2b92657890a:33415 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-24T18:51:20,730 DEBUG [RS:0;f2b92657890a:33415 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-24T18:51:20,731 DEBUG [RS:0;f2b92657890a:33415 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-24T18:51:20,731 INFO [RS:0;f2b92657890a:33415 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-24T18:51:20,731 INFO [RS:0;f2b92657890a:33415 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-24T18:51:20,841 INFO [RS:0;f2b92657890a:33415 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=f2b92657890a%2C33415%2C1732474278738, suffix=, logDir=hdfs://localhost:40437/user/jenkins/test-data/91bb223a-a964-b097-0799-aa34536414f2/WALs/f2b92657890a,33415,1732474278738, archiveDir=hdfs://localhost:40437/user/jenkins/test-data/91bb223a-a964-b097-0799-aa34536414f2/oldWALs, maxLogs=32 2024-11-24T18:51:20,845 INFO [RS:0;f2b92657890a:33415 {}] monitor.StreamSlowMonitor(122): New stream slow monitor f2b92657890a%2C33415%2C1732474278738.1732474280844 2024-11-24T18:51:20,853 INFO [RS:0;f2b92657890a:33415 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/91bb223a-a964-b097-0799-aa34536414f2/WALs/f2b92657890a,33415,1732474278738/f2b92657890a%2C33415%2C1732474278738.1732474280844 2024-11-24T18:51:20,855 DEBUG [RS:0;f2b92657890a:33415 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46037:46037),(127.0.0.1/127.0.0.1:43553:43553)] 2024-11-24T18:51:20,962 DEBUG [f2b92657890a:44605 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-24T18:51:20,976 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=f2b92657890a,33415,1732474278738 2024-11-24T18:51:20,982 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as f2b92657890a,33415,1732474278738, state=OPENING 2024-11-24T18:51:21,098 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-24T18:51:21,108 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44605-0x1016e2f888c0000, quorum=127.0.0.1:64439, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T18:51:21,108 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33415-0x1016e2f888c0001, quorum=127.0.0.1:64439, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T18:51:21,109 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-24T18:51:21,109 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-24T18:51:21,111 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-24T18:51:21,112 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=f2b92657890a,33415,1732474278738}] 2024-11-24T18:51:21,287 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-24T18:51:21,290 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56133, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-24T18:51:21,301 INFO [RS_OPEN_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-24T18:51:21,302 INFO [RS_OPEN_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-24T18:51:21,305 INFO [RS_OPEN_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=f2b92657890a%2C33415%2C1732474278738.meta, suffix=.meta, logDir=hdfs://localhost:40437/user/jenkins/test-data/91bb223a-a964-b097-0799-aa34536414f2/WALs/f2b92657890a,33415,1732474278738, archiveDir=hdfs://localhost:40437/user/jenkins/test-data/91bb223a-a964-b097-0799-aa34536414f2/oldWALs, maxLogs=32 2024-11-24T18:51:21,307 INFO [RS_OPEN_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor f2b92657890a%2C33415%2C1732474278738.meta.1732474281307.meta 2024-11-24T18:51:21,315 INFO [RS_OPEN_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/91bb223a-a964-b097-0799-aa34536414f2/WALs/f2b92657890a,33415,1732474278738/f2b92657890a%2C33415%2C1732474278738.meta.1732474281307.meta 2024-11-24T18:51:21,316 DEBUG [RS_OPEN_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43553:43553),(127.0.0.1/127.0.0.1:46037:46037)] 2024-11-24T18:51:21,317 DEBUG [RS_OPEN_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-24T18:51:21,318 DEBUG [RS_OPEN_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-24T18:51:21,321 DEBUG [RS_OPEN_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-24T18:51:21,325 INFO [RS_OPEN_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-24T18:51:21,329 DEBUG [RS_OPEN_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-24T18:51:21,329 DEBUG [RS_OPEN_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T18:51:21,329 DEBUG [RS_OPEN_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-24T18:51:21,329 DEBUG [RS_OPEN_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-24T18:51:21,332 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-24T18:51:21,334 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-24T18:51:21,334 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T18:51:21,335 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T18:51:21,335 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-24T18:51:21,337 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-24T18:51:21,337 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T18:51:21,338 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T18:51:21,338 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-24T18:51:21,340 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-24T18:51:21,340 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T18:51:21,341 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T18:51:21,341 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-24T18:51:21,342 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-24T18:51:21,343 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T18:51:21,343 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T18:51:21,344 DEBUG [RS_OPEN_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-24T18:51:21,345 DEBUG [RS_OPEN_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40437/user/jenkins/test-data/91bb223a-a964-b097-0799-aa34536414f2/data/hbase/meta/1588230740 2024-11-24T18:51:21,347 DEBUG [RS_OPEN_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40437/user/jenkins/test-data/91bb223a-a964-b097-0799-aa34536414f2/data/hbase/meta/1588230740 2024-11-24T18:51:21,350 DEBUG [RS_OPEN_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-24T18:51:21,350 DEBUG [RS_OPEN_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-24T18:51:21,351 DEBUG [RS_OPEN_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-24T18:51:21,354 DEBUG [RS_OPEN_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-24T18:51:21,356 INFO [RS_OPEN_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=804819, jitterRate=0.023380905389785767}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-24T18:51:21,356 DEBUG [RS_OPEN_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-24T18:51:21,357 DEBUG [RS_OPEN_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732474281330Writing region info on filesystem at 1732474281330Initializing all the Stores at 1732474281332 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732474281332Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732474281332Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732474281332Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732474281332Cleaning up temporary data from old regions at 1732474281350 (+18 ms)Running coprocessor post-open hooks at 1732474281356 (+6 ms)Region opened successfully at 1732474281357 (+1 ms) 2024-11-24T18:51:21,363 INFO [RS_OPEN_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732474281279 2024-11-24T18:51:21,374 DEBUG [RS_OPEN_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-24T18:51:21,375 INFO [RS_OPEN_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-24T18:51:21,377 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=f2b92657890a,33415,1732474278738 2024-11-24T18:51:21,380 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as f2b92657890a,33415,1732474278738, state=OPEN 2024-11-24T18:51:21,561 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33415-0x1016e2f888c0001, quorum=127.0.0.1:64439, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-24T18:51:21,561 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44605-0x1016e2f888c0000, quorum=127.0.0.1:64439, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-24T18:51:21,561 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-24T18:51:21,561 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-24T18:51:21,562 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=f2b92657890a,33415,1732474278738 2024-11-24T18:51:21,568 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-24T18:51:21,568 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=f2b92657890a,33415,1732474278738 in 450 msec 2024-11-24T18:51:21,574 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-24T18:51:21,575 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 1.0230 sec 2024-11-24T18:51:21,576 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-24T18:51:21,576 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-24T18:51:21,596 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-24T18:51:21,597 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=f2b92657890a,33415,1732474278738, seqNum=-1] 2024-11-24T18:51:21,622 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-24T18:51:21,624 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50589, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-24T18:51:21,648 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 1.4400 sec 2024-11-24T18:51:21,649 INFO [master/f2b92657890a:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732474281648, completionTime=-1 2024-11-24T18:51:21,652 INFO [master/f2b92657890a:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-24T18:51:21,652 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-24T18:51:21,685 INFO [master/f2b92657890a:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-24T18:51:21,685 INFO [master/f2b92657890a:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732474341685 2024-11-24T18:51:21,685 INFO [master/f2b92657890a:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732474401685 2024-11-24T18:51:21,685 INFO [master/f2b92657890a:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 32 msec 2024-11-24T18:51:21,689 INFO [master/f2b92657890a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=f2b92657890a,44605,1732474277958-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T18:51:21,689 INFO [master/f2b92657890a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=f2b92657890a,44605,1732474277958-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T18:51:21,690 INFO [master/f2b92657890a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=f2b92657890a,44605,1732474277958-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T18:51:21,692 INFO [master/f2b92657890a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-f2b92657890a:44605, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T18:51:21,692 INFO [master/f2b92657890a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-24T18:51:21,693 INFO [master/f2b92657890a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-24T18:51:21,699 DEBUG [master/f2b92657890a:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-24T18:51:21,727 INFO [master/f2b92657890a:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 2.801sec 2024-11-24T18:51:21,729 INFO [master/f2b92657890a:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-24T18:51:21,730 INFO [master/f2b92657890a:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-24T18:51:21,731 INFO [master/f2b92657890a:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-24T18:51:21,731 INFO [master/f2b92657890a:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-24T18:51:21,731 INFO [master/f2b92657890a:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-24T18:51:21,732 INFO [master/f2b92657890a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=f2b92657890a,44605,1732474277958-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-24T18:51:21,733 INFO [master/f2b92657890a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=f2b92657890a,44605,1732474277958-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-24T18:51:21,742 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-24T18:51:21,744 INFO [master/f2b92657890a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-24T18:51:21,744 INFO [master/f2b92657890a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=f2b92657890a,44605,1732474277958-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T18:51:21,783 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@265d957d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-24T18:51:21,785 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-11-24T18:51:21,785 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-11-24T18:51:21,789 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request f2b92657890a,44605,-1 for getting cluster id 2024-11-24T18:51:21,792 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-24T18:51:21,809 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'e0995372-115f-4ff6-a6bb-7c9773ea57e4' 2024-11-24T18:51:21,813 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-24T18:51:21,813 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "e0995372-115f-4ff6-a6bb-7c9773ea57e4" 2024-11-24T18:51:21,814 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5956a3e0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-24T18:51:21,814 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [f2b92657890a,44605,-1] 2024-11-24T18:51:21,817 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-24T18:51:21,822 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T18:51:21,824 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55582, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-24T18:51:21,827 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2aef31c4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-24T18:51:21,827 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-24T18:51:21,835 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=f2b92657890a,33415,1732474278738, seqNum=-1] 2024-11-24T18:51:21,835 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-24T18:51:21,838 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44030, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-24T18:51:21,864 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=f2b92657890a,44605,1732474277958 2024-11-24T18:51:21,864 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T18:51:21,873 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-24T18:51:21,878 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-24T18:51:21,883 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.AsyncConnectionImpl(321): The fetched master address is f2b92657890a,44605,1732474277958 2024-11-24T18:51:21,885 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@5fea0898 2024-11-24T18:51:21,886 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-24T18:51:21,889 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34576, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-24T18:51:21,892 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44605 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-24T18:51:21,892 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44605 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-24T18:51:21,895 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44605 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testSlowSyncLogRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-24T18:51:21,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44605 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling 2024-11-24T18:51:21,906 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-11-24T18:51:21,909 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44605 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testSlowSyncLogRolling" procId is: 4 2024-11-24T18:51:21,909 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T18:51:21,912 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-24T18:51:21,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44605 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-24T18:51:21,956 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42819 is added to blk_1073741835_1011 (size=389) 2024-11-24T18:51:21,957 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46307 is added to blk_1073741835_1011 (size=389) 2024-11-24T18:51:21,960 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 6368e8824a29bcabcc8580c2132b9082, NAME => 'TestLogRolling-testSlowSyncLogRolling,,1732474281891.6368e8824a29bcabcc8580c2132b9082.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testSlowSyncLogRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:40437/user/jenkins/test-data/91bb223a-a964-b097-0799-aa34536414f2 2024-11-24T18:51:21,969 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46307 is added to blk_1073741836_1012 (size=72) 2024-11-24T18:51:21,969 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42819 is added to blk_1073741836_1012 (size=72) 2024-11-24T18:51:21,970 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testSlowSyncLogRolling,,1732474281891.6368e8824a29bcabcc8580c2132b9082.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T18:51:21,970 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1722): Closing 6368e8824a29bcabcc8580c2132b9082, disabling compactions & flushes 2024-11-24T18:51:21,971 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testSlowSyncLogRolling,,1732474281891.6368e8824a29bcabcc8580c2132b9082. 2024-11-24T18:51:21,971 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testSlowSyncLogRolling,,1732474281891.6368e8824a29bcabcc8580c2132b9082. 2024-11-24T18:51:21,971 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testSlowSyncLogRolling,,1732474281891.6368e8824a29bcabcc8580c2132b9082. after waiting 0 ms 2024-11-24T18:51:21,971 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testSlowSyncLogRolling,,1732474281891.6368e8824a29bcabcc8580c2132b9082. 2024-11-24T18:51:21,971 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testSlowSyncLogRolling,,1732474281891.6368e8824a29bcabcc8580c2132b9082. 2024-11-24T18:51:21,971 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for 6368e8824a29bcabcc8580c2132b9082: Waiting for close lock at 1732474281970Disabling compacts and flushes for region at 1732474281970Disabling writes for close at 1732474281971 (+1 ms)Writing region close event to WAL at 1732474281971Closed at 1732474281971 2024-11-24T18:51:21,973 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_ADD_TO_META 2024-11-24T18:51:21,978 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testSlowSyncLogRolling,,1732474281891.6368e8824a29bcabcc8580c2132b9082.","families":{"info":[{"qualifier":"regioninfo","vlen":71,"tag":[],"timestamp":"1732474281973"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732474281973"}]},"ts":"1732474281973"} 2024-11-24T18:51:21,984 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-24T18:51:21,986 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-24T18:51:21,989 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testSlowSyncLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732474281986"}]},"ts":"1732474281986"} 2024-11-24T18:51:21,994 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testSlowSyncLogRolling, state=ENABLING in hbase:meta 2024-11-24T18:51:21,997 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=6368e8824a29bcabcc8580c2132b9082, ASSIGN}] 2024-11-24T18:51:22,000 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=6368e8824a29bcabcc8580c2132b9082, ASSIGN 2024-11-24T18:51:22,002 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=6368e8824a29bcabcc8580c2132b9082, ASSIGN; state=OFFLINE, location=f2b92657890a,33415,1732474278738; forceNewPlan=false, retain=false 2024-11-24T18:51:22,154 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=6368e8824a29bcabcc8580c2132b9082, regionState=OPENING, regionLocation=f2b92657890a,33415,1732474278738 2024-11-24T18:51:22,159 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=6368e8824a29bcabcc8580c2132b9082, ASSIGN because future has completed 2024-11-24T18:51:22,160 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 6368e8824a29bcabcc8580c2132b9082, server=f2b92657890a,33415,1732474278738}] 2024-11-24T18:51:22,322 INFO [RS_OPEN_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testSlowSyncLogRolling,,1732474281891.6368e8824a29bcabcc8580c2132b9082. 2024-11-24T18:51:22,322 DEBUG [RS_OPEN_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 6368e8824a29bcabcc8580c2132b9082, NAME => 'TestLogRolling-testSlowSyncLogRolling,,1732474281891.6368e8824a29bcabcc8580c2132b9082.', STARTKEY => '', ENDKEY => ''} 2024-11-24T18:51:22,323 DEBUG [RS_OPEN_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testSlowSyncLogRolling 6368e8824a29bcabcc8580c2132b9082 2024-11-24T18:51:22,323 DEBUG [RS_OPEN_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testSlowSyncLogRolling,,1732474281891.6368e8824a29bcabcc8580c2132b9082.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T18:51:22,323 DEBUG [RS_OPEN_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 6368e8824a29bcabcc8580c2132b9082 2024-11-24T18:51:22,323 DEBUG [RS_OPEN_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 6368e8824a29bcabcc8580c2132b9082 2024-11-24T18:51:22,326 INFO [StoreOpener-6368e8824a29bcabcc8580c2132b9082-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 6368e8824a29bcabcc8580c2132b9082 2024-11-24T18:51:22,328 INFO [StoreOpener-6368e8824a29bcabcc8580c2132b9082-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 6368e8824a29bcabcc8580c2132b9082 columnFamilyName info 2024-11-24T18:51:22,328 DEBUG [StoreOpener-6368e8824a29bcabcc8580c2132b9082-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T18:51:22,330 INFO [StoreOpener-6368e8824a29bcabcc8580c2132b9082-1 {}] regionserver.HStore(327): Store=6368e8824a29bcabcc8580c2132b9082/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T18:51:22,330 DEBUG [RS_OPEN_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 6368e8824a29bcabcc8580c2132b9082 2024-11-24T18:51:22,331 DEBUG [RS_OPEN_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40437/user/jenkins/test-data/91bb223a-a964-b097-0799-aa34536414f2/data/default/TestLogRolling-testSlowSyncLogRolling/6368e8824a29bcabcc8580c2132b9082 2024-11-24T18:51:22,332 DEBUG [RS_OPEN_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40437/user/jenkins/test-data/91bb223a-a964-b097-0799-aa34536414f2/data/default/TestLogRolling-testSlowSyncLogRolling/6368e8824a29bcabcc8580c2132b9082 2024-11-24T18:51:22,333 DEBUG [RS_OPEN_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 6368e8824a29bcabcc8580c2132b9082 2024-11-24T18:51:22,333 DEBUG [RS_OPEN_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 6368e8824a29bcabcc8580c2132b9082 2024-11-24T18:51:22,336 DEBUG [RS_OPEN_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 6368e8824a29bcabcc8580c2132b9082 2024-11-24T18:51:22,340 DEBUG [RS_OPEN_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40437/user/jenkins/test-data/91bb223a-a964-b097-0799-aa34536414f2/data/default/TestLogRolling-testSlowSyncLogRolling/6368e8824a29bcabcc8580c2132b9082/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-24T18:51:22,341 INFO [RS_OPEN_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 6368e8824a29bcabcc8580c2132b9082; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=708766, jitterRate=-0.09875757992267609}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-24T18:51:22,341 DEBUG [RS_OPEN_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 6368e8824a29bcabcc8580c2132b9082 2024-11-24T18:51:22,342 DEBUG [RS_OPEN_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 6368e8824a29bcabcc8580c2132b9082: Running coprocessor pre-open hook at 1732474282323Writing region info on filesystem at 1732474282323Initializing all the Stores at 1732474282325 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732474282325Cleaning up temporary data from old regions at 1732474282333 (+8 ms)Running coprocessor post-open hooks at 1732474282341 (+8 ms)Region opened successfully at 1732474282342 (+1 ms) 2024-11-24T18:51:22,344 INFO [RS_OPEN_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testSlowSyncLogRolling,,1732474281891.6368e8824a29bcabcc8580c2132b9082., pid=6, masterSystemTime=1732474282315 2024-11-24T18:51:22,348 DEBUG [RS_OPEN_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testSlowSyncLogRolling,,1732474281891.6368e8824a29bcabcc8580c2132b9082. 2024-11-24T18:51:22,348 INFO [RS_OPEN_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testSlowSyncLogRolling,,1732474281891.6368e8824a29bcabcc8580c2132b9082. 2024-11-24T18:51:22,349 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=6368e8824a29bcabcc8580c2132b9082, regionState=OPEN, openSeqNum=2, regionLocation=f2b92657890a,33415,1732474278738 2024-11-24T18:51:22,354 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 6368e8824a29bcabcc8580c2132b9082, server=f2b92657890a,33415,1732474278738 because future has completed 2024-11-24T18:51:22,360 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-24T18:51:22,360 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 6368e8824a29bcabcc8580c2132b9082, server=f2b92657890a,33415,1732474278738 in 196 msec 2024-11-24T18:51:22,364 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-24T18:51:22,365 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=6368e8824a29bcabcc8580c2132b9082, ASSIGN in 364 msec 2024-11-24T18:51:22,366 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-24T18:51:22,366 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testSlowSyncLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732474282366"}]},"ts":"1732474282366"} 2024-11-24T18:51:22,370 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testSlowSyncLogRolling, state=ENABLED in hbase:meta 2024-11-24T18:51:22,372 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_POST_OPERATION 2024-11-24T18:51:22,376 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling in 474 msec 2024-11-24T18:51:26,848 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-24T18:51:26,908 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-24T18:51:26,909 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testSlowSyncLogRolling' 2024-11-24T18:51:28,328 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-24T18:51:28,329 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-24T18:51:28,331 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling 2024-11-24T18:51:28,331 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling Metrics about Tables on a single HBase RegionServer 2024-11-24T18:51:28,333 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-24T18:51:28,333 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-24T18:51:28,333 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-24T18:51:28,333 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-24T18:51:31,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44605 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-24T18:51:31,933 INFO [RPCClient-NioEventLoopGroup-4-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testSlowSyncLogRolling completed 2024-11-24T18:51:31,936 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testSlowSyncLogRolling,, stopping at row=TestLogRolling-testSlowSyncLogRolling ,, for max=2147483647 with caching=100 2024-11-24T18:51:31,943 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testSlowSyncLogRolling 2024-11-24T18:51:31,944 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testSlowSyncLogRolling,,1732474281891.6368e8824a29bcabcc8580c2132b9082. 2024-11-24T18:51:31,945 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor f2b92657890a%2C33415%2C1732474278738.1732474291944 2024-11-24T18:51:31,998 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:51:31,998 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:51:31,998 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:51:31,998 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:51:31,999 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:51:31,999 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/91bb223a-a964-b097-0799-aa34536414f2/WALs/f2b92657890a,33415,1732474278738/f2b92657890a%2C33415%2C1732474278738.1732474280844 with entries=1, filesize=443 B; new WAL /user/jenkins/test-data/91bb223a-a964-b097-0799-aa34536414f2/WALs/f2b92657890a,33415,1732474278738/f2b92657890a%2C33415%2C1732474278738.1732474291944 2024-11-24T18:51:32,001 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46037:46037),(127.0.0.1/127.0.0.1:43553:43553)] 2024-11-24T18:51:32,001 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:40437/user/jenkins/test-data/91bb223a-a964-b097-0799-aa34536414f2/WALs/f2b92657890a,33415,1732474278738/f2b92657890a%2C33415%2C1732474278738.1732474280844 is not closed yet, will try archiving it next time 2024-11-24T18:51:32,003 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46307 is added to blk_1073741833_1009 (size=451) 2024-11-24T18:51:32,003 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42819 is added to blk_1073741833_1009 (size=451) 2024-11-24T18:51:32,005 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:40437/user/jenkins/test-data/91bb223a-a964-b097-0799-aa34536414f2/WALs/f2b92657890a,33415,1732474278738/f2b92657890a%2C33415%2C1732474278738.1732474280844 to hdfs://localhost:40437/user/jenkins/test-data/91bb223a-a964-b097-0799-aa34536414f2/oldWALs/f2b92657890a%2C33415%2C1732474278738.1732474280844 2024-11-24T18:51:32,010 DEBUG [RPCClient-NioEventLoopGroup-4-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testSlowSyncLogRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testSlowSyncLogRolling,,1732474281891.6368e8824a29bcabcc8580c2132b9082., hostname=f2b92657890a,33415,1732474278738, seqNum=2] 2024-11-24T18:51:44,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33415 {}] regionserver.HRegion(8855): Flush requested on 6368e8824a29bcabcc8580c2132b9082 2024-11-24T18:51:44,060 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 6368e8824a29bcabcc8580c2132b9082 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-24T18:51:44,119 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40437/user/jenkins/test-data/91bb223a-a964-b097-0799-aa34536414f2/data/default/TestLogRolling-testSlowSyncLogRolling/6368e8824a29bcabcc8580c2132b9082/.tmp/info/f632f31eb6fb4b84b9859ae9a61d7446 is 1080, key is row0001/info:/1732474292013/Put/seqid=0 2024-11-24T18:51:44,131 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46307 is added to blk_1073741838_1014 (size=12509) 2024-11-24T18:51:44,131 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42819 is added to blk_1073741838_1014 (size=12509) 2024-11-24T18:51:44,132 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:40437/user/jenkins/test-data/91bb223a-a964-b097-0799-aa34536414f2/data/default/TestLogRolling-testSlowSyncLogRolling/6368e8824a29bcabcc8580c2132b9082/.tmp/info/f632f31eb6fb4b84b9859ae9a61d7446 2024-11-24T18:51:44,185 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40437/user/jenkins/test-data/91bb223a-a964-b097-0799-aa34536414f2/data/default/TestLogRolling-testSlowSyncLogRolling/6368e8824a29bcabcc8580c2132b9082/.tmp/info/f632f31eb6fb4b84b9859ae9a61d7446 as hdfs://localhost:40437/user/jenkins/test-data/91bb223a-a964-b097-0799-aa34536414f2/data/default/TestLogRolling-testSlowSyncLogRolling/6368e8824a29bcabcc8580c2132b9082/info/f632f31eb6fb4b84b9859ae9a61d7446 2024-11-24T18:51:44,196 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40437/user/jenkins/test-data/91bb223a-a964-b097-0799-aa34536414f2/data/default/TestLogRolling-testSlowSyncLogRolling/6368e8824a29bcabcc8580c2132b9082/info/f632f31eb6fb4b84b9859ae9a61d7446, entries=7, sequenceid=11, filesize=12.2 K 2024-11-24T18:51:44,203 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 6368e8824a29bcabcc8580c2132b9082 in 145ms, sequenceid=11, compaction requested=false 2024-11-24T18:51:44,204 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 6368e8824a29bcabcc8580c2132b9082: 2024-11-24T18:51:47,031 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-24T18:51:52,079 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor f2b92657890a%2C33415%2C1732474278738.1732474312078 2024-11-24T18:51:52,297 INFO [Time-limited test {}] wal.AbstractFSWAL(1368): Slow sync cost: 213 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42819,DS-454ffbd1-f0ee-406e-8b8b-fd819823a078,DISK], DatanodeInfoWithStorage[127.0.0.1:46307,DS-274a7b26-166f-489a-96f9-afbae2c94bf1,DISK]] 2024-11-24T18:51:52,297 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:51:52,298 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:51:52,298 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:51:52,298 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:51:52,299 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:51:52,299 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/91bb223a-a964-b097-0799-aa34536414f2/WALs/f2b92657890a,33415,1732474278738/f2b92657890a%2C33415%2C1732474278738.1732474291944 with entries=12, filesize=12.10 KB; new WAL /user/jenkins/test-data/91bb223a-a964-b097-0799-aa34536414f2/WALs/f2b92657890a,33415,1732474278738/f2b92657890a%2C33415%2C1732474278738.1732474312078 2024-11-24T18:51:52,300 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46037:46037),(127.0.0.1/127.0.0.1:43553:43553)] 2024-11-24T18:51:52,300 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:40437/user/jenkins/test-data/91bb223a-a964-b097-0799-aa34536414f2/WALs/f2b92657890a,33415,1732474278738/f2b92657890a%2C33415%2C1732474278738.1732474291944 is not closed yet, will try archiving it next time 2024-11-24T18:51:52,302 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46307 is added to blk_1073741837_1013 (size=12399) 2024-11-24T18:51:52,303 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42819 is added to blk_1073741837_1013 (size=12399) 2024-11-24T18:51:52,505 INFO [FSHLog-0-hdfs://localhost:40437/user/jenkins/test-data/91bb223a-a964-b097-0799-aa34536414f2-prefix:f2b92657890a,33415,1732474278738 {}] wal.AbstractFSWAL(1368): Slow sync cost: 202 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42819,DS-454ffbd1-f0ee-406e-8b8b-fd819823a078,DISK], DatanodeInfoWithStorage[127.0.0.1:46307,DS-274a7b26-166f-489a-96f9-afbae2c94bf1,DISK]] 2024-11-24T18:51:54,711 INFO [FSHLog-0-hdfs://localhost:40437/user/jenkins/test-data/91bb223a-a964-b097-0799-aa34536414f2-prefix:f2b92657890a,33415,1732474278738 {}] wal.AbstractFSWAL(1368): Slow sync cost: 202 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42819,DS-454ffbd1-f0ee-406e-8b8b-fd819823a078,DISK], DatanodeInfoWithStorage[127.0.0.1:46307,DS-274a7b26-166f-489a-96f9-afbae2c94bf1,DISK]] 2024-11-24T18:51:56,918 INFO [FSHLog-0-hdfs://localhost:40437/user/jenkins/test-data/91bb223a-a964-b097-0799-aa34536414f2-prefix:f2b92657890a,33415,1732474278738 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42819,DS-454ffbd1-f0ee-406e-8b8b-fd819823a078,DISK], DatanodeInfoWithStorage[127.0.0.1:46307,DS-274a7b26-166f-489a-96f9-afbae2c94bf1,DISK]] 2024-11-24T18:51:59,124 INFO [FSHLog-0-hdfs://localhost:40437/user/jenkins/test-data/91bb223a-a964-b097-0799-aa34536414f2-prefix:f2b92657890a,33415,1732474278738 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42819,DS-454ffbd1-f0ee-406e-8b8b-fd819823a078,DISK], DatanodeInfoWithStorage[127.0.0.1:46307,DS-274a7b26-166f-489a-96f9-afbae2c94bf1,DISK]] 2024-11-24T18:51:59,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33415 {}] regionserver.HRegion(8855): Flush requested on 6368e8824a29bcabcc8580c2132b9082 2024-11-24T18:51:59,124 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 6368e8824a29bcabcc8580c2132b9082 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-24T18:51:59,328 INFO [FSHLog-0-hdfs://localhost:40437/user/jenkins/test-data/91bb223a-a964-b097-0799-aa34536414f2-prefix:f2b92657890a,33415,1732474278738 {}] wal.AbstractFSWAL(1368): Slow sync cost: 202 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42819,DS-454ffbd1-f0ee-406e-8b8b-fd819823a078,DISK], DatanodeInfoWithStorage[127.0.0.1:46307,DS-274a7b26-166f-489a-96f9-afbae2c94bf1,DISK]] 2024-11-24T18:51:59,340 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40437/user/jenkins/test-data/91bb223a-a964-b097-0799-aa34536414f2/data/default/TestLogRolling-testSlowSyncLogRolling/6368e8824a29bcabcc8580c2132b9082/.tmp/info/5cf7cb1f35d0401d9ad0cc0fa2016f3d is 1080, key is row0008/info:/1732474306060/Put/seqid=0 2024-11-24T18:51:59,347 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46307 is added to blk_1073741840_1016 (size=12509) 2024-11-24T18:51:59,348 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42819 is added to blk_1073741840_1016 (size=12509) 2024-11-24T18:51:59,349 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=21 (bloomFilter=true), to=hdfs://localhost:40437/user/jenkins/test-data/91bb223a-a964-b097-0799-aa34536414f2/data/default/TestLogRolling-testSlowSyncLogRolling/6368e8824a29bcabcc8580c2132b9082/.tmp/info/5cf7cb1f35d0401d9ad0cc0fa2016f3d 2024-11-24T18:51:59,362 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40437/user/jenkins/test-data/91bb223a-a964-b097-0799-aa34536414f2/data/default/TestLogRolling-testSlowSyncLogRolling/6368e8824a29bcabcc8580c2132b9082/.tmp/info/5cf7cb1f35d0401d9ad0cc0fa2016f3d as hdfs://localhost:40437/user/jenkins/test-data/91bb223a-a964-b097-0799-aa34536414f2/data/default/TestLogRolling-testSlowSyncLogRolling/6368e8824a29bcabcc8580c2132b9082/info/5cf7cb1f35d0401d9ad0cc0fa2016f3d 2024-11-24T18:51:59,373 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40437/user/jenkins/test-data/91bb223a-a964-b097-0799-aa34536414f2/data/default/TestLogRolling-testSlowSyncLogRolling/6368e8824a29bcabcc8580c2132b9082/info/5cf7cb1f35d0401d9ad0cc0fa2016f3d, entries=7, sequenceid=21, filesize=12.2 K 2024-11-24T18:51:59,575 INFO [FSHLog-0-hdfs://localhost:40437/user/jenkins/test-data/91bb223a-a964-b097-0799-aa34536414f2-prefix:f2b92657890a,33415,1732474278738 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42819,DS-454ffbd1-f0ee-406e-8b8b-fd819823a078,DISK], DatanodeInfoWithStorage[127.0.0.1:46307,DS-274a7b26-166f-489a-96f9-afbae2c94bf1,DISK]] 2024-11-24T18:51:59,576 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 6368e8824a29bcabcc8580c2132b9082 in 451ms, sequenceid=21, compaction requested=false 2024-11-24T18:51:59,576 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 6368e8824a29bcabcc8580c2132b9082: 2024-11-24T18:51:59,577 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=24.4 K, sizeToCheck=16.0 K 2024-11-24T18:51:59,577 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-24T18:51:59,578 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:40437/user/jenkins/test-data/91bb223a-a964-b097-0799-aa34536414f2/data/default/TestLogRolling-testSlowSyncLogRolling/6368e8824a29bcabcc8580c2132b9082/info/f632f31eb6fb4b84b9859ae9a61d7446 because midkey is the same as first or last row 2024-11-24T18:52:01,361 INFO [FSHLog-0-hdfs://localhost:40437/user/jenkins/test-data/91bb223a-a964-b097-0799-aa34536414f2-prefix:f2b92657890a,33415,1732474278738 {}] wal.AbstractFSWAL(1368): Slow sync cost: 231 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42819,DS-454ffbd1-f0ee-406e-8b8b-fd819823a078,DISK], DatanodeInfoWithStorage[127.0.0.1:46307,DS-274a7b26-166f-489a-96f9-afbae2c94bf1,DISK]] 2024-11-24T18:52:02,261 INFO [master/f2b92657890a:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-24T18:52:02,261 INFO [master/f2b92657890a:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-24T18:52:03,569 INFO [FSHLog-0-hdfs://localhost:40437/user/jenkins/test-data/91bb223a-a964-b097-0799-aa34536414f2-prefix:f2b92657890a,33415,1732474278738 {}] wal.AbstractFSWAL(1368): Slow sync cost: 203 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42819,DS-454ffbd1-f0ee-406e-8b8b-fd819823a078,DISK], DatanodeInfoWithStorage[127.0.0.1:46307,DS-274a7b26-166f-489a-96f9-afbae2c94bf1,DISK]] 2024-11-24T18:52:03,574 WARN [FSHLog-0-hdfs://localhost:40437/user/jenkins/test-data/91bb223a-a964-b097-0799-aa34536414f2-prefix:f2b92657890a,33415,1732474278738 {}] wal.AbstractFSWAL(2201): Requesting log roll because we exceeded slow sync threshold; count=8, threshold=5, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42819,DS-454ffbd1-f0ee-406e-8b8b-fd819823a078,DISK], DatanodeInfoWithStorage[127.0.0.1:46307,DS-274a7b26-166f-489a-96f9-afbae2c94bf1,DISK]] 2024-11-24T18:52:03,576 DEBUG [regionserver/f2b92657890a:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog f2b92657890a%2C33415%2C1732474278738:(num 1732474312078) roll requested 2024-11-24T18:52:03,577 INFO [regionserver/f2b92657890a:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor f2b92657890a%2C33415%2C1732474278738.1732474323577 2024-11-24T18:52:03,793 INFO [regionserver/f2b92657890a:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 212 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42819,DS-454ffbd1-f0ee-406e-8b8b-fd819823a078,DISK], DatanodeInfoWithStorage[127.0.0.1:46307,DS-274a7b26-166f-489a-96f9-afbae2c94bf1,DISK]] 2024-11-24T18:52:03,793 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:52:03,793 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:52:03,794 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:52:03,794 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:52:03,794 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:52:03,794 INFO [regionserver/f2b92657890a:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/91bb223a-a964-b097-0799-aa34536414f2/WALs/f2b92657890a,33415,1732474278738/f2b92657890a%2C33415%2C1732474278738.1732474312078 with entries=8, filesize=7.55 KB; new WAL /user/jenkins/test-data/91bb223a-a964-b097-0799-aa34536414f2/WALs/f2b92657890a,33415,1732474278738/f2b92657890a%2C33415%2C1732474278738.1732474323577 2024-11-24T18:52:03,796 DEBUG [regionserver/f2b92657890a:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46037:46037),(127.0.0.1/127.0.0.1:43553:43553)] 2024-11-24T18:52:03,796 DEBUG [regionserver/f2b92657890a:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:40437/user/jenkins/test-data/91bb223a-a964-b097-0799-aa34536414f2/WALs/f2b92657890a,33415,1732474278738/f2b92657890a%2C33415%2C1732474278738.1732474312078 is not closed yet, will try archiving it next time 2024-11-24T18:52:03,796 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:40437/user/jenkins/test-data/91bb223a-a964-b097-0799-aa34536414f2/WALs/f2b92657890a,33415,1732474278738/f2b92657890a%2C33415%2C1732474278738.1732474291944 to hdfs://localhost:40437/user/jenkins/test-data/91bb223a-a964-b097-0799-aa34536414f2/oldWALs/f2b92657890a%2C33415%2C1732474278738.1732474291944 2024-11-24T18:52:03,797 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46307 is added to blk_1073741839_1015 (size=7739) 2024-11-24T18:52:03,798 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42819 is added to blk_1073741839_1015 (size=7739) 2024-11-24T18:52:05,776 INFO [FSHLog-0-hdfs://localhost:40437/user/jenkins/test-data/91bb223a-a964-b097-0799-aa34536414f2-prefix:f2b92657890a,33415,1732474278738 {}] wal.AbstractFSWAL(1368): Slow sync cost: 202 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42819,DS-454ffbd1-f0ee-406e-8b8b-fd819823a078,DISK], DatanodeInfoWithStorage[127.0.0.1:46307,DS-274a7b26-166f-489a-96f9-afbae2c94bf1,DISK]] 2024-11-24T18:52:07,324 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 6368e8824a29bcabcc8580c2132b9082, had cached 0 bytes from a total of 25018 2024-11-24T18:52:07,984 INFO [FSHLog-0-hdfs://localhost:40437/user/jenkins/test-data/91bb223a-a964-b097-0799-aa34536414f2-prefix:f2b92657890a,33415,1732474278738 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42819,DS-454ffbd1-f0ee-406e-8b8b-fd819823a078,DISK], DatanodeInfoWithStorage[127.0.0.1:46307,DS-274a7b26-166f-489a-96f9-afbae2c94bf1,DISK]] 2024-11-24T18:52:10,188 INFO [FSHLog-0-hdfs://localhost:40437/user/jenkins/test-data/91bb223a-a964-b097-0799-aa34536414f2-prefix:f2b92657890a,33415,1732474278738 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42819,DS-454ffbd1-f0ee-406e-8b8b-fd819823a078,DISK], DatanodeInfoWithStorage[127.0.0.1:46307,DS-274a7b26-166f-489a-96f9-afbae2c94bf1,DISK]] 2024-11-24T18:52:12,393 INFO [FSHLog-0-hdfs://localhost:40437/user/jenkins/test-data/91bb223a-a964-b097-0799-aa34536414f2-prefix:f2b92657890a,33415,1732474278738 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42819,DS-454ffbd1-f0ee-406e-8b8b-fd819823a078,DISK], DatanodeInfoWithStorage[127.0.0.1:46307,DS-274a7b26-166f-489a-96f9-afbae2c94bf1,DISK]] 2024-11-24T18:52:14,396 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-11-24T18:52:14,397 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor f2b92657890a%2C33415%2C1732474278738.1732474334397 2024-11-24T18:52:17,032 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-24T18:52:19,416 INFO [Time-limited test {}] wal.AbstractFSWAL(1368): Slow sync cost: 5013 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42819,DS-454ffbd1-f0ee-406e-8b8b-fd819823a078,DISK], DatanodeInfoWithStorage[127.0.0.1:46307,DS-274a7b26-166f-489a-96f9-afbae2c94bf1,DISK]] 2024-11-24T18:52:19,418 WARN [Time-limited test {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5013 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42819,DS-454ffbd1-f0ee-406e-8b8b-fd819823a078,DISK], DatanodeInfoWithStorage[127.0.0.1:46307,DS-274a7b26-166f-489a-96f9-afbae2c94bf1,DISK]] 2024-11-24T18:52:19,419 DEBUG [regionserver/f2b92657890a:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog f2b92657890a%2C33415%2C1732474278738:(num 1732474334397) roll requested 2024-11-24T18:52:19,419 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:52:19,419 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:52:19,419 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:52:19,419 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:52:19,420 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:52:19,420 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/91bb223a-a964-b097-0799-aa34536414f2/WALs/f2b92657890a,33415,1732474278738/f2b92657890a%2C33415%2C1732474278738.1732474323577 with entries=4, filesize=4.63 KB; new WAL /user/jenkins/test-data/91bb223a-a964-b097-0799-aa34536414f2/WALs/f2b92657890a,33415,1732474278738/f2b92657890a%2C33415%2C1732474278738.1732474334397 2024-11-24T18:52:19,421 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43553:43553),(127.0.0.1/127.0.0.1:46037:46037)] 2024-11-24T18:52:19,421 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:40437/user/jenkins/test-data/91bb223a-a964-b097-0799-aa34536414f2/WALs/f2b92657890a,33415,1732474278738/f2b92657890a%2C33415%2C1732474278738.1732474323577 is not closed yet, will try archiving it next time 2024-11-24T18:52:19,422 INFO [regionserver/f2b92657890a:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor f2b92657890a%2C33415%2C1732474278738.1732474339422 2024-11-24T18:52:19,423 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46307 is added to blk_1073741841_1017 (size=4753) 2024-11-24T18:52:19,424 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42819 is added to blk_1073741841_1017 (size=4753) 2024-11-24T18:52:24,486 INFO [FSHLog-0-hdfs://localhost:40437/user/jenkins/test-data/91bb223a-a964-b097-0799-aa34536414f2-prefix:f2b92657890a,33415,1732474278738 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5062 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:46307,DS-274a7b26-166f-489a-96f9-afbae2c94bf1,DISK], DatanodeInfoWithStorage[127.0.0.1:42819,DS-454ffbd1-f0ee-406e-8b8b-fd819823a078,DISK]] 2024-11-24T18:52:24,487 WARN [FSHLog-0-hdfs://localhost:40437/user/jenkins/test-data/91bb223a-a964-b097-0799-aa34536414f2-prefix:f2b92657890a,33415,1732474278738 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5062 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:46307,DS-274a7b26-166f-489a-96f9-afbae2c94bf1,DISK], DatanodeInfoWithStorage[127.0.0.1:42819,DS-454ffbd1-f0ee-406e-8b8b-fd819823a078,DISK]] 2024-11-24T18:52:24,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33415 {}] regionserver.HRegion(8855): Flush requested on 6368e8824a29bcabcc8580c2132b9082 2024-11-24T18:52:24,487 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 6368e8824a29bcabcc8580c2132b9082 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-24T18:52:24,493 INFO [regionserver/f2b92657890a:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 5067 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:46307,DS-274a7b26-166f-489a-96f9-afbae2c94bf1,DISK], DatanodeInfoWithStorage[127.0.0.1:42819,DS-454ffbd1-f0ee-406e-8b8b-fd819823a078,DISK]] 2024-11-24T18:52:24,493 WARN [regionserver/f2b92657890a:0.logRoller {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5067 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:46307,DS-274a7b26-166f-489a-96f9-afbae2c94bf1,DISK], DatanodeInfoWithStorage[127.0.0.1:42819,DS-454ffbd1-f0ee-406e-8b8b-fd819823a078,DISK]] 2024-11-24T18:52:26,488 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-11-24T18:52:29,546 INFO [FSHLog-0-hdfs://localhost:40437/user/jenkins/test-data/91bb223a-a964-b097-0799-aa34536414f2-prefix:f2b92657890a,33415,1732474278738 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5057 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:46307,DS-274a7b26-166f-489a-96f9-afbae2c94bf1,DISK], DatanodeInfoWithStorage[127.0.0.1:42819,DS-454ffbd1-f0ee-406e-8b8b-fd819823a078,DISK]] 2024-11-24T18:52:29,547 WARN [FSHLog-0-hdfs://localhost:40437/user/jenkins/test-data/91bb223a-a964-b097-0799-aa34536414f2-prefix:f2b92657890a,33415,1732474278738 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5057 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:46307,DS-274a7b26-166f-489a-96f9-afbae2c94bf1,DISK], DatanodeInfoWithStorage[127.0.0.1:42819,DS-454ffbd1-f0ee-406e-8b8b-fd819823a078,DISK]] 2024-11-24T18:52:29,547 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:52:29,548 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:52:29,548 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:52:29,549 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:52:29,549 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:52:29,550 INFO [regionserver/f2b92657890a:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/91bb223a-a964-b097-0799-aa34536414f2/WALs/f2b92657890a,33415,1732474278738/f2b92657890a%2C33415%2C1732474278738.1732474334397 with entries=2, filesize=1.52 KB; new WAL /user/jenkins/test-data/91bb223a-a964-b097-0799-aa34536414f2/WALs/f2b92657890a,33415,1732474278738/f2b92657890a%2C33415%2C1732474278738.1732474339422 2024-11-24T18:52:29,552 DEBUG [regionserver/f2b92657890a:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46037:46037),(127.0.0.1/127.0.0.1:43553:43553)] 2024-11-24T18:52:29,552 DEBUG [regionserver/f2b92657890a:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:40437/user/jenkins/test-data/91bb223a-a964-b097-0799-aa34536414f2/WALs/f2b92657890a,33415,1732474278738/f2b92657890a%2C33415%2C1732474278738.1732474334397 is not closed yet, will try archiving it next time 2024-11-24T18:52:29,552 DEBUG [regionserver/f2b92657890a:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog f2b92657890a%2C33415%2C1732474278738:(num 1732474339422) roll requested 2024-11-24T18:52:29,553 INFO [regionserver/f2b92657890a:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor f2b92657890a%2C33415%2C1732474278738.1732474349553 2024-11-24T18:52:29,553 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42819 is added to blk_1073741842_1018 (size=1569) 2024-11-24T18:52:29,554 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46307 is added to blk_1073741842_1018 (size=1569) 2024-11-24T18:52:29,556 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40437/user/jenkins/test-data/91bb223a-a964-b097-0799-aa34536414f2/data/default/TestLogRolling-testSlowSyncLogRolling/6368e8824a29bcabcc8580c2132b9082/.tmp/info/9af0574f5e604c989669d08583c78be8 is 1080, key is row0015/info:/1732474321128/Put/seqid=0 2024-11-24T18:52:29,565 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46307 is added to blk_1073741844_1020 (size=12509) 2024-11-24T18:52:29,565 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42819 is added to blk_1073741844_1020 (size=12509) 2024-11-24T18:52:29,566 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=31 (bloomFilter=true), to=hdfs://localhost:40437/user/jenkins/test-data/91bb223a-a964-b097-0799-aa34536414f2/data/default/TestLogRolling-testSlowSyncLogRolling/6368e8824a29bcabcc8580c2132b9082/.tmp/info/9af0574f5e604c989669d08583c78be8 2024-11-24T18:52:29,576 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40437/user/jenkins/test-data/91bb223a-a964-b097-0799-aa34536414f2/data/default/TestLogRolling-testSlowSyncLogRolling/6368e8824a29bcabcc8580c2132b9082/.tmp/info/9af0574f5e604c989669d08583c78be8 as hdfs://localhost:40437/user/jenkins/test-data/91bb223a-a964-b097-0799-aa34536414f2/data/default/TestLogRolling-testSlowSyncLogRolling/6368e8824a29bcabcc8580c2132b9082/info/9af0574f5e604c989669d08583c78be8 2024-11-24T18:52:29,586 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40437/user/jenkins/test-data/91bb223a-a964-b097-0799-aa34536414f2/data/default/TestLogRolling-testSlowSyncLogRolling/6368e8824a29bcabcc8580c2132b9082/info/9af0574f5e604c989669d08583c78be8, entries=7, sequenceid=31, filesize=12.2 K 2024-11-24T18:52:34,588 INFO [FSHLog-0-hdfs://localhost:40437/user/jenkins/test-data/91bb223a-a964-b097-0799-aa34536414f2-prefix:f2b92657890a,33415,1732474278738 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5001 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42819,DS-454ffbd1-f0ee-406e-8b8b-fd819823a078,DISK], DatanodeInfoWithStorage[127.0.0.1:46307,DS-274a7b26-166f-489a-96f9-afbae2c94bf1,DISK]] 2024-11-24T18:52:34,589 WARN [FSHLog-0-hdfs://localhost:40437/user/jenkins/test-data/91bb223a-a964-b097-0799-aa34536414f2-prefix:f2b92657890a,33415,1732474278738 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5001 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42819,DS-454ffbd1-f0ee-406e-8b8b-fd819823a078,DISK], DatanodeInfoWithStorage[127.0.0.1:46307,DS-274a7b26-166f-489a-96f9-afbae2c94bf1,DISK]] 2024-11-24T18:52:34,589 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 6368e8824a29bcabcc8580c2132b9082 in 10101ms, sequenceid=31, compaction requested=true 2024-11-24T18:52:34,589 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 6368e8824a29bcabcc8580c2132b9082: 2024-11-24T18:52:34,589 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=36.6 K, sizeToCheck=16.0 K 2024-11-24T18:52:34,589 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-24T18:52:34,589 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:40437/user/jenkins/test-data/91bb223a-a964-b097-0799-aa34536414f2/data/default/TestLogRolling-testSlowSyncLogRolling/6368e8824a29bcabcc8580c2132b9082/info/f632f31eb6fb4b84b9859ae9a61d7446 because midkey is the same as first or last row 2024-11-24T18:52:34,591 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6368e8824a29bcabcc8580c2132b9082:info, priority=-2147483648, current under compaction store size is 1 2024-11-24T18:52:34,593 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-24T18:52:34,594 DEBUG [RS:0;f2b92657890a:33415-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-24T18:52:34,597 DEBUG [RS:0;f2b92657890a:33415-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37527 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-24T18:52:34,599 DEBUG [RS:0;f2b92657890a:33415-shortCompactions-0 {}] regionserver.HStore(1541): 6368e8824a29bcabcc8580c2132b9082/info is initiating minor compaction (all files) 2024-11-24T18:52:34,600 INFO [RS:0;f2b92657890a:33415-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 6368e8824a29bcabcc8580c2132b9082/info in TestLogRolling-testSlowSyncLogRolling,,1732474281891.6368e8824a29bcabcc8580c2132b9082. 2024-11-24T18:52:34,600 INFO [RS:0;f2b92657890a:33415-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40437/user/jenkins/test-data/91bb223a-a964-b097-0799-aa34536414f2/data/default/TestLogRolling-testSlowSyncLogRolling/6368e8824a29bcabcc8580c2132b9082/info/f632f31eb6fb4b84b9859ae9a61d7446, hdfs://localhost:40437/user/jenkins/test-data/91bb223a-a964-b097-0799-aa34536414f2/data/default/TestLogRolling-testSlowSyncLogRolling/6368e8824a29bcabcc8580c2132b9082/info/5cf7cb1f35d0401d9ad0cc0fa2016f3d, hdfs://localhost:40437/user/jenkins/test-data/91bb223a-a964-b097-0799-aa34536414f2/data/default/TestLogRolling-testSlowSyncLogRolling/6368e8824a29bcabcc8580c2132b9082/info/9af0574f5e604c989669d08583c78be8] into tmpdir=hdfs://localhost:40437/user/jenkins/test-data/91bb223a-a964-b097-0799-aa34536414f2/data/default/TestLogRolling-testSlowSyncLogRolling/6368e8824a29bcabcc8580c2132b9082/.tmp, totalSize=36.6 K 2024-11-24T18:52:34,601 DEBUG [RS:0;f2b92657890a:33415-shortCompactions-0 {}] compactions.Compactor(225): Compacting f632f31eb6fb4b84b9859ae9a61d7446, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1732474292013 2024-11-24T18:52:34,602 DEBUG [RS:0;f2b92657890a:33415-shortCompactions-0 {}] compactions.Compactor(225): Compacting 5cf7cb1f35d0401d9ad0cc0fa2016f3d, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=21, earliestPutTs=1732474306060 2024-11-24T18:52:34,603 DEBUG [RS:0;f2b92657890a:33415-shortCompactions-0 {}] compactions.Compactor(225): Compacting 9af0574f5e604c989669d08583c78be8, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=31, earliestPutTs=1732474321128 2024-11-24T18:52:34,620 INFO [regionserver/f2b92657890a:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 5063 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42819,DS-454ffbd1-f0ee-406e-8b8b-fd819823a078,DISK], DatanodeInfoWithStorage[127.0.0.1:46307,DS-274a7b26-166f-489a-96f9-afbae2c94bf1,DISK]] 2024-11-24T18:52:34,620 WARN [regionserver/f2b92657890a:0.logRoller {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5063 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42819,DS-454ffbd1-f0ee-406e-8b8b-fd819823a078,DISK], DatanodeInfoWithStorage[127.0.0.1:46307,DS-274a7b26-166f-489a-96f9-afbae2c94bf1,DISK]] 2024-11-24T18:52:34,620 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:52:34,621 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:52:34,621 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:52:34,621 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:52:34,621 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:52:34,621 INFO [regionserver/f2b92657890a:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/91bb223a-a964-b097-0799-aa34536414f2/WALs/f2b92657890a,33415,1732474278738/f2b92657890a%2C33415%2C1732474278738.1732474339422 with entries=1, filesize=430 B; new WAL /user/jenkins/test-data/91bb223a-a964-b097-0799-aa34536414f2/WALs/f2b92657890a,33415,1732474278738/f2b92657890a%2C33415%2C1732474278738.1732474349553 2024-11-24T18:52:34,623 DEBUG [regionserver/f2b92657890a:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43553:43553),(127.0.0.1/127.0.0.1:46037:46037)] 2024-11-24T18:52:34,624 DEBUG [regionserver/f2b92657890a:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:40437/user/jenkins/test-data/91bb223a-a964-b097-0799-aa34536414f2/WALs/f2b92657890a,33415,1732474278738/f2b92657890a%2C33415%2C1732474278738.1732474339422 is not closed yet, will try archiving it next time 2024-11-24T18:52:34,624 DEBUG [regionserver/f2b92657890a:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog f2b92657890a%2C33415%2C1732474278738:(num 1732474349553) roll requested 2024-11-24T18:52:34,624 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46307 is added to blk_1073741843_1019 (size=438) 2024-11-24T18:52:34,624 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor f2b92657890a%2C33415%2C1732474278738.1732474354624 2024-11-24T18:52:34,625 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42819 is added to blk_1073741843_1019 (size=438) 2024-11-24T18:52:34,625 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:40437/user/jenkins/test-data/91bb223a-a964-b097-0799-aa34536414f2/WALs/f2b92657890a,33415,1732474278738/f2b92657890a%2C33415%2C1732474278738.1732474312078 to hdfs://localhost:40437/user/jenkins/test-data/91bb223a-a964-b097-0799-aa34536414f2/oldWALs/f2b92657890a%2C33415%2C1732474278738.1732474312078 2024-11-24T18:52:34,627 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:40437/user/jenkins/test-data/91bb223a-a964-b097-0799-aa34536414f2/WALs/f2b92657890a,33415,1732474278738/f2b92657890a%2C33415%2C1732474278738.1732474323577 to hdfs://localhost:40437/user/jenkins/test-data/91bb223a-a964-b097-0799-aa34536414f2/oldWALs/f2b92657890a%2C33415%2C1732474278738.1732474323577 2024-11-24T18:52:34,629 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:40437/user/jenkins/test-data/91bb223a-a964-b097-0799-aa34536414f2/WALs/f2b92657890a,33415,1732474278738/f2b92657890a%2C33415%2C1732474278738.1732474334397 to hdfs://localhost:40437/user/jenkins/test-data/91bb223a-a964-b097-0799-aa34536414f2/oldWALs/f2b92657890a%2C33415%2C1732474278738.1732474334397 2024-11-24T18:52:34,631 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:40437/user/jenkins/test-data/91bb223a-a964-b097-0799-aa34536414f2/WALs/f2b92657890a,33415,1732474278738/f2b92657890a%2C33415%2C1732474278738.1732474339422 to hdfs://localhost:40437/user/jenkins/test-data/91bb223a-a964-b097-0799-aa34536414f2/oldWALs/f2b92657890a%2C33415%2C1732474278738.1732474339422 2024-11-24T18:52:34,637 INFO [RS:0;f2b92657890a:33415-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6368e8824a29bcabcc8580c2132b9082#info#compaction#3 average throughput is 10.77 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-24T18:52:34,638 DEBUG [RS:0;f2b92657890a:33415-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40437/user/jenkins/test-data/91bb223a-a964-b097-0799-aa34536414f2/data/default/TestLogRolling-testSlowSyncLogRolling/6368e8824a29bcabcc8580c2132b9082/.tmp/info/9b91f469aae64a838bacdf196f0cd0ac is 1080, key is row0001/info:/1732474292013/Put/seqid=0 2024-11-24T18:52:34,642 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:52:34,642 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:52:34,643 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:52:34,643 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:52:34,643 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:52:34,643 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/91bb223a-a964-b097-0799-aa34536414f2/WALs/f2b92657890a,33415,1732474278738/f2b92657890a%2C33415%2C1732474278738.1732474349553 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/91bb223a-a964-b097-0799-aa34536414f2/WALs/f2b92657890a,33415,1732474278738/f2b92657890a%2C33415%2C1732474278738.1732474354624 2024-11-24T18:52:34,645 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46037:46037),(127.0.0.1/127.0.0.1:43553:43553)] 2024-11-24T18:52:34,645 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:40437/user/jenkins/test-data/91bb223a-a964-b097-0799-aa34536414f2/WALs/f2b92657890a,33415,1732474278738/f2b92657890a%2C33415%2C1732474278738.1732474349553 is not closed yet, will try archiving it next time 2024-11-24T18:52:34,645 INFO [regionserver/f2b92657890a:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor f2b92657890a%2C33415%2C1732474278738.1732474354645 2024-11-24T18:52:34,646 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42819 is added to blk_1073741845_1021 (size=93) 2024-11-24T18:52:34,646 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46307 is added to blk_1073741845_1021 (size=93) 2024-11-24T18:52:34,647 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42819 is added to blk_1073741847_1023 (size=27710) 2024-11-24T18:52:34,647 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46307 is added to blk_1073741847_1023 (size=27710) 2024-11-24T18:52:34,653 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:52:34,653 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:52:34,653 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:52:34,653 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:52:34,653 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:52:34,653 INFO [regionserver/f2b92657890a:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/91bb223a-a964-b097-0799-aa34536414f2/WALs/f2b92657890a,33415,1732474278738/f2b92657890a%2C33415%2C1732474278738.1732474354624 with entries=1, filesize=1.22 KB; new WAL /user/jenkins/test-data/91bb223a-a964-b097-0799-aa34536414f2/WALs/f2b92657890a,33415,1732474278738/f2b92657890a%2C33415%2C1732474278738.1732474354645 2024-11-24T18:52:34,656 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46307 is added to blk_1073741846_1022 (size=1258) 2024-11-24T18:52:34,656 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42819 is added to blk_1073741846_1022 (size=1258) 2024-11-24T18:52:34,658 DEBUG [RS:0;f2b92657890a:33415-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40437/user/jenkins/test-data/91bb223a-a964-b097-0799-aa34536414f2/data/default/TestLogRolling-testSlowSyncLogRolling/6368e8824a29bcabcc8580c2132b9082/.tmp/info/9b91f469aae64a838bacdf196f0cd0ac as hdfs://localhost:40437/user/jenkins/test-data/91bb223a-a964-b097-0799-aa34536414f2/data/default/TestLogRolling-testSlowSyncLogRolling/6368e8824a29bcabcc8580c2132b9082/info/9b91f469aae64a838bacdf196f0cd0ac 2024-11-24T18:52:34,661 DEBUG [regionserver/f2b92657890a:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43553:43553),(127.0.0.1/127.0.0.1:46037:46037)] 2024-11-24T18:52:34,661 DEBUG [regionserver/f2b92657890a:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:40437/user/jenkins/test-data/91bb223a-a964-b097-0799-aa34536414f2/WALs/f2b92657890a,33415,1732474278738/f2b92657890a%2C33415%2C1732474278738.1732474349553 is not closed yet, will try archiving it next time 2024-11-24T18:52:34,661 DEBUG [regionserver/f2b92657890a:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:40437/user/jenkins/test-data/91bb223a-a964-b097-0799-aa34536414f2/WALs/f2b92657890a,33415,1732474278738/f2b92657890a%2C33415%2C1732474278738.1732474354624 is not closed yet, will try archiving it next time 2024-11-24T18:52:34,674 INFO [RS:0;f2b92657890a:33415-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 6368e8824a29bcabcc8580c2132b9082/info of 6368e8824a29bcabcc8580c2132b9082 into 9b91f469aae64a838bacdf196f0cd0ac(size=27.1 K), total size for store is 27.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-24T18:52:34,674 DEBUG [RS:0;f2b92657890a:33415-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 6368e8824a29bcabcc8580c2132b9082: 2024-11-24T18:52:34,676 INFO [RS:0;f2b92657890a:33415-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testSlowSyncLogRolling,,1732474281891.6368e8824a29bcabcc8580c2132b9082., storeName=6368e8824a29bcabcc8580c2132b9082/info, priority=13, startTime=1732474354591; duration=0sec 2024-11-24T18:52:34,676 DEBUG [RS:0;f2b92657890a:33415-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-11-24T18:52:34,676 DEBUG [RS:0;f2b92657890a:33415-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-24T18:52:34,676 DEBUG [RS:0;f2b92657890a:33415-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:40437/user/jenkins/test-data/91bb223a-a964-b097-0799-aa34536414f2/data/default/TestLogRolling-testSlowSyncLogRolling/6368e8824a29bcabcc8580c2132b9082/info/9b91f469aae64a838bacdf196f0cd0ac because midkey is the same as first or last row 2024-11-24T18:52:34,677 DEBUG [RS:0;f2b92657890a:33415-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-11-24T18:52:34,677 DEBUG [RS:0;f2b92657890a:33415-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-24T18:52:34,677 DEBUG [RS:0;f2b92657890a:33415-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:40437/user/jenkins/test-data/91bb223a-a964-b097-0799-aa34536414f2/data/default/TestLogRolling-testSlowSyncLogRolling/6368e8824a29bcabcc8580c2132b9082/info/9b91f469aae64a838bacdf196f0cd0ac because midkey is the same as first or last row 2024-11-24T18:52:34,677 DEBUG [RS:0;f2b92657890a:33415-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-11-24T18:52:34,677 DEBUG [RS:0;f2b92657890a:33415-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-24T18:52:34,677 DEBUG [RS:0;f2b92657890a:33415-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:40437/user/jenkins/test-data/91bb223a-a964-b097-0799-aa34536414f2/data/default/TestLogRolling-testSlowSyncLogRolling/6368e8824a29bcabcc8580c2132b9082/info/9b91f469aae64a838bacdf196f0cd0ac because midkey is the same as first or last row 2024-11-24T18:52:34,677 DEBUG [RS:0;f2b92657890a:33415-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-24T18:52:34,677 DEBUG [RS:0;f2b92657890a:33415-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6368e8824a29bcabcc8580c2132b9082:info 2024-11-24T18:52:35,047 DEBUG [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(879): hdfs://localhost:40437/user/jenkins/test-data/91bb223a-a964-b097-0799-aa34536414f2/WALs/f2b92657890a,33415,1732474278738/f2b92657890a%2C33415%2C1732474278738.1732474354624 is not closed yet, will try archiving it next time 2024-11-24T18:52:35,047 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:40437/user/jenkins/test-data/91bb223a-a964-b097-0799-aa34536414f2/WALs/f2b92657890a,33415,1732474278738/f2b92657890a%2C33415%2C1732474278738.1732474349553 to hdfs://localhost:40437/user/jenkins/test-data/91bb223a-a964-b097-0799-aa34536414f2/oldWALs/f2b92657890a%2C33415%2C1732474278738.1732474349553 2024-11-24T18:52:46,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33415 {}] regionserver.HRegion(8855): Flush requested on 6368e8824a29bcabcc8580c2132b9082 2024-11-24T18:52:46,697 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 6368e8824a29bcabcc8580c2132b9082 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-24T18:52:46,709 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40437/user/jenkins/test-data/91bb223a-a964-b097-0799-aa34536414f2/data/default/TestLogRolling-testSlowSyncLogRolling/6368e8824a29bcabcc8580c2132b9082/.tmp/info/90a9b8cddd064f49a9de0005f1f28555 is 1080, key is row0022/info:/1732474354647/Put/seqid=0 2024-11-24T18:52:46,715 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42819 is added to blk_1073741849_1025 (size=12509) 2024-11-24T18:52:46,715 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46307 is added to blk_1073741849_1025 (size=12509) 2024-11-24T18:52:46,716 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=42 (bloomFilter=true), to=hdfs://localhost:40437/user/jenkins/test-data/91bb223a-a964-b097-0799-aa34536414f2/data/default/TestLogRolling-testSlowSyncLogRolling/6368e8824a29bcabcc8580c2132b9082/.tmp/info/90a9b8cddd064f49a9de0005f1f28555 2024-11-24T18:52:46,726 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40437/user/jenkins/test-data/91bb223a-a964-b097-0799-aa34536414f2/data/default/TestLogRolling-testSlowSyncLogRolling/6368e8824a29bcabcc8580c2132b9082/.tmp/info/90a9b8cddd064f49a9de0005f1f28555 as hdfs://localhost:40437/user/jenkins/test-data/91bb223a-a964-b097-0799-aa34536414f2/data/default/TestLogRolling-testSlowSyncLogRolling/6368e8824a29bcabcc8580c2132b9082/info/90a9b8cddd064f49a9de0005f1f28555 2024-11-24T18:52:46,734 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40437/user/jenkins/test-data/91bb223a-a964-b097-0799-aa34536414f2/data/default/TestLogRolling-testSlowSyncLogRolling/6368e8824a29bcabcc8580c2132b9082/info/90a9b8cddd064f49a9de0005f1f28555, entries=7, sequenceid=42, filesize=12.2 K 2024-11-24T18:52:46,736 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 6368e8824a29bcabcc8580c2132b9082 in 40ms, sequenceid=42, compaction requested=false 2024-11-24T18:52:46,736 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 6368e8824a29bcabcc8580c2132b9082: 2024-11-24T18:52:46,736 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=39.3 K, sizeToCheck=16.0 K 2024-11-24T18:52:46,736 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-24T18:52:46,736 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:40437/user/jenkins/test-data/91bb223a-a964-b097-0799-aa34536414f2/data/default/TestLogRolling-testSlowSyncLogRolling/6368e8824a29bcabcc8580c2132b9082/info/9b91f469aae64a838bacdf196f0cd0ac because midkey is the same as first or last row 2024-11-24T18:52:47,032 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-24T18:52:52,324 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 6368e8824a29bcabcc8580c2132b9082, had cached 0 bytes from a total of 40219 2024-11-24T18:52:54,714 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-24T18:52:54,716 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-24T18:52:54,717 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-24T18:52:54,724 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T18:52:54,724 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T18:52:54,724 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-24T18:52:54,725 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-24T18:52:54,725 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1675802499, stopped=false 2024-11-24T18:52:54,725 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=f2b92657890a,44605,1732474277958 2024-11-24T18:52:54,796 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44605-0x1016e2f888c0000, quorum=127.0.0.1:64439, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-24T18:52:54,796 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33415-0x1016e2f888c0001, quorum=127.0.0.1:64439, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-24T18:52:54,796 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-24T18:52:54,796 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44605-0x1016e2f888c0000, quorum=127.0.0.1:64439, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T18:52:54,796 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33415-0x1016e2f888c0001, quorum=127.0.0.1:64439, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T18:52:54,796 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-24T18:52:54,797 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-24T18:52:54,797 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T18:52:54,797 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'f2b92657890a,33415,1732474278738' ***** 2024-11-24T18:52:54,797 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:44605-0x1016e2f888c0000, quorum=127.0.0.1:64439, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-24T18:52:54,797 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:33415-0x1016e2f888c0001, quorum=127.0.0.1:64439, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-24T18:52:54,797 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-24T18:52:54,797 INFO [RS:0;f2b92657890a:33415 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-24T18:52:54,798 INFO [RS:0;f2b92657890a:33415 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-24T18:52:54,798 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-24T18:52:54,798 INFO [RS:0;f2b92657890a:33415 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-24T18:52:54,798 INFO [RS:0;f2b92657890a:33415 {}] regionserver.HRegionServer(3091): Received CLOSE for 6368e8824a29bcabcc8580c2132b9082 2024-11-24T18:52:54,799 INFO [RS:0;f2b92657890a:33415 {}] regionserver.HRegionServer(959): stopping server f2b92657890a,33415,1732474278738 2024-11-24T18:52:54,799 INFO [RS:0;f2b92657890a:33415 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-24T18:52:54,799 INFO [RS:0;f2b92657890a:33415 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;f2b92657890a:33415. 2024-11-24T18:52:54,799 DEBUG [RS:0;f2b92657890a:33415 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-24T18:52:54,799 DEBUG [RS:0;f2b92657890a:33415 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T18:52:54,799 DEBUG [RS_CLOSE_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 6368e8824a29bcabcc8580c2132b9082, disabling compactions & flushes 2024-11-24T18:52:54,799 INFO [RS:0;f2b92657890a:33415 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-24T18:52:54,799 INFO [RS_CLOSE_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testSlowSyncLogRolling,,1732474281891.6368e8824a29bcabcc8580c2132b9082. 2024-11-24T18:52:54,799 INFO [RS:0;f2b92657890a:33415 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-24T18:52:54,799 DEBUG [RS_CLOSE_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testSlowSyncLogRolling,,1732474281891.6368e8824a29bcabcc8580c2132b9082. 2024-11-24T18:52:54,799 INFO [RS:0;f2b92657890a:33415 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-24T18:52:54,799 DEBUG [RS_CLOSE_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testSlowSyncLogRolling,,1732474281891.6368e8824a29bcabcc8580c2132b9082. after waiting 0 ms 2024-11-24T18:52:54,800 DEBUG [RS_CLOSE_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testSlowSyncLogRolling,,1732474281891.6368e8824a29bcabcc8580c2132b9082. 2024-11-24T18:52:54,800 INFO [RS:0;f2b92657890a:33415 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-24T18:52:54,800 INFO [RS_CLOSE_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing 6368e8824a29bcabcc8580c2132b9082 1/1 column families, dataSize=3.15 KB heapSize=3.63 KB 2024-11-24T18:52:54,800 INFO [RS:0;f2b92657890a:33415 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-24T18:52:54,800 DEBUG [RS_CLOSE_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-24T18:52:54,800 DEBUG [RS:0;f2b92657890a:33415 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, 6368e8824a29bcabcc8580c2132b9082=TestLogRolling-testSlowSyncLogRolling,,1732474281891.6368e8824a29bcabcc8580c2132b9082.} 2024-11-24T18:52:54,800 INFO [RS_CLOSE_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-24T18:52:54,800 DEBUG [RS_CLOSE_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-24T18:52:54,800 DEBUG [RS_CLOSE_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-24T18:52:54,800 DEBUG [RS_CLOSE_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-24T18:52:54,800 INFO [RS_CLOSE_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.65 KB heapSize=3.67 KB 2024-11-24T18:52:54,800 DEBUG [RS:0;f2b92657890a:33415 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 6368e8824a29bcabcc8580c2132b9082 2024-11-24T18:52:54,807 DEBUG [RS_CLOSE_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40437/user/jenkins/test-data/91bb223a-a964-b097-0799-aa34536414f2/data/default/TestLogRolling-testSlowSyncLogRolling/6368e8824a29bcabcc8580c2132b9082/.tmp/info/542cc5bf8225458ba881da8dd5948727 is 1080, key is row0029/info:/1732474368701/Put/seqid=0 2024-11-24T18:52:54,817 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42819 is added to blk_1073741850_1026 (size=8193) 2024-11-24T18:52:54,817 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46307 is added to blk_1073741850_1026 (size=8193) 2024-11-24T18:52:54,820 INFO [RS_CLOSE_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.15 KB at sequenceid=48 (bloomFilter=true), to=hdfs://localhost:40437/user/jenkins/test-data/91bb223a-a964-b097-0799-aa34536414f2/data/default/TestLogRolling-testSlowSyncLogRolling/6368e8824a29bcabcc8580c2132b9082/.tmp/info/542cc5bf8225458ba881da8dd5948727 2024-11-24T18:52:54,823 DEBUG [RS_CLOSE_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40437/user/jenkins/test-data/91bb223a-a964-b097-0799-aa34536414f2/data/hbase/meta/1588230740/.tmp/info/d9a95c4e98774697be95827ad3aedcbd is 195, key is TestLogRolling-testSlowSyncLogRolling,,1732474281891.6368e8824a29bcabcc8580c2132b9082./info:regioninfo/1732474282349/Put/seqid=0 2024-11-24T18:52:54,835 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46307 is added to blk_1073741851_1027 (size=7016) 2024-11-24T18:52:54,835 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42819 is added to blk_1073741851_1027 (size=7016) 2024-11-24T18:52:54,843 DEBUG [RS_CLOSE_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40437/user/jenkins/test-data/91bb223a-a964-b097-0799-aa34536414f2/data/default/TestLogRolling-testSlowSyncLogRolling/6368e8824a29bcabcc8580c2132b9082/.tmp/info/542cc5bf8225458ba881da8dd5948727 as hdfs://localhost:40437/user/jenkins/test-data/91bb223a-a964-b097-0799-aa34536414f2/data/default/TestLogRolling-testSlowSyncLogRolling/6368e8824a29bcabcc8580c2132b9082/info/542cc5bf8225458ba881da8dd5948727 2024-11-24T18:52:54,854 INFO [RS_CLOSE_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40437/user/jenkins/test-data/91bb223a-a964-b097-0799-aa34536414f2/data/default/TestLogRolling-testSlowSyncLogRolling/6368e8824a29bcabcc8580c2132b9082/info/542cc5bf8225458ba881da8dd5948727, entries=3, sequenceid=48, filesize=8.0 K 2024-11-24T18:52:54,855 INFO [RS_CLOSE_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~3.15 KB/3228, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 6368e8824a29bcabcc8580c2132b9082 in 55ms, sequenceid=48, compaction requested=true 2024-11-24T18:52:54,856 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1732474281891.6368e8824a29bcabcc8580c2132b9082.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:40437/user/jenkins/test-data/91bb223a-a964-b097-0799-aa34536414f2/data/default/TestLogRolling-testSlowSyncLogRolling/6368e8824a29bcabcc8580c2132b9082/info/f632f31eb6fb4b84b9859ae9a61d7446, hdfs://localhost:40437/user/jenkins/test-data/91bb223a-a964-b097-0799-aa34536414f2/data/default/TestLogRolling-testSlowSyncLogRolling/6368e8824a29bcabcc8580c2132b9082/info/5cf7cb1f35d0401d9ad0cc0fa2016f3d, hdfs://localhost:40437/user/jenkins/test-data/91bb223a-a964-b097-0799-aa34536414f2/data/default/TestLogRolling-testSlowSyncLogRolling/6368e8824a29bcabcc8580c2132b9082/info/9af0574f5e604c989669d08583c78be8] to archive 2024-11-24T18:52:54,860 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1732474281891.6368e8824a29bcabcc8580c2132b9082.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-24T18:52:54,864 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1732474281891.6368e8824a29bcabcc8580c2132b9082.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40437/user/jenkins/test-data/91bb223a-a964-b097-0799-aa34536414f2/data/default/TestLogRolling-testSlowSyncLogRolling/6368e8824a29bcabcc8580c2132b9082/info/f632f31eb6fb4b84b9859ae9a61d7446 to hdfs://localhost:40437/user/jenkins/test-data/91bb223a-a964-b097-0799-aa34536414f2/archive/data/default/TestLogRolling-testSlowSyncLogRolling/6368e8824a29bcabcc8580c2132b9082/info/f632f31eb6fb4b84b9859ae9a61d7446 2024-11-24T18:52:54,867 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1732474281891.6368e8824a29bcabcc8580c2132b9082.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40437/user/jenkins/test-data/91bb223a-a964-b097-0799-aa34536414f2/data/default/TestLogRolling-testSlowSyncLogRolling/6368e8824a29bcabcc8580c2132b9082/info/5cf7cb1f35d0401d9ad0cc0fa2016f3d to hdfs://localhost:40437/user/jenkins/test-data/91bb223a-a964-b097-0799-aa34536414f2/archive/data/default/TestLogRolling-testSlowSyncLogRolling/6368e8824a29bcabcc8580c2132b9082/info/5cf7cb1f35d0401d9ad0cc0fa2016f3d 2024-11-24T18:52:54,870 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1732474281891.6368e8824a29bcabcc8580c2132b9082.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40437/user/jenkins/test-data/91bb223a-a964-b097-0799-aa34536414f2/data/default/TestLogRolling-testSlowSyncLogRolling/6368e8824a29bcabcc8580c2132b9082/info/9af0574f5e604c989669d08583c78be8 to hdfs://localhost:40437/user/jenkins/test-data/91bb223a-a964-b097-0799-aa34536414f2/archive/data/default/TestLogRolling-testSlowSyncLogRolling/6368e8824a29bcabcc8580c2132b9082/info/9af0574f5e604c989669d08583c78be8 2024-11-24T18:52:54,884 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1732474281891.6368e8824a29bcabcc8580c2132b9082.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=f2b92657890a:44605 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 16 more 2024-11-24T18:52:54,890 WARN [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1732474281891.6368e8824a29bcabcc8580c2132b9082.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [f632f31eb6fb4b84b9859ae9a61d7446=12509, 5cf7cb1f35d0401d9ad0cc0fa2016f3d=12509, 9af0574f5e604c989669d08583c78be8=12509] 2024-11-24T18:52:54,900 DEBUG [RS_CLOSE_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40437/user/jenkins/test-data/91bb223a-a964-b097-0799-aa34536414f2/data/default/TestLogRolling-testSlowSyncLogRolling/6368e8824a29bcabcc8580c2132b9082/recovered.edits/51.seqid, newMaxSeqId=51, maxSeqId=1 2024-11-24T18:52:54,902 INFO [RS_CLOSE_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testSlowSyncLogRolling,,1732474281891.6368e8824a29bcabcc8580c2132b9082. 2024-11-24T18:52:54,902 DEBUG [RS_CLOSE_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 6368e8824a29bcabcc8580c2132b9082: Waiting for close lock at 1732474374799Running coprocessor pre-close hooks at 1732474374799Disabling compacts and flushes for region at 1732474374799Disabling writes for close at 1732474374799Obtaining lock to block concurrent updates at 1732474374800 (+1 ms)Preparing flush snapshotting stores in 6368e8824a29bcabcc8580c2132b9082 at 1732474374800Finished memstore snapshotting TestLogRolling-testSlowSyncLogRolling,,1732474281891.6368e8824a29bcabcc8580c2132b9082., syncing WAL and waiting on mvcc, flushsize=dataSize=3228, getHeapSize=3696, getOffHeapSize=0, getCellsCount=3 at 1732474374800Flushing stores of TestLogRolling-testSlowSyncLogRolling,,1732474281891.6368e8824a29bcabcc8580c2132b9082. at 1732474374801 (+1 ms)Flushing 6368e8824a29bcabcc8580c2132b9082/info: creating writer at 1732474374801Flushing 6368e8824a29bcabcc8580c2132b9082/info: appending metadata at 1732474374806 (+5 ms)Flushing 6368e8824a29bcabcc8580c2132b9082/info: closing flushed file at 1732474374806Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6b42c778: reopening flushed file at 1732474374842 (+36 ms)Finished flush of dataSize ~3.15 KB/3228, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 6368e8824a29bcabcc8580c2132b9082 in 55ms, sequenceid=48, compaction requested=true at 1732474374855 (+13 ms)Writing region close event to WAL at 1732474374891 (+36 ms)Running coprocessor post-close hooks at 1732474374901 (+10 ms)Closed at 1732474374902 (+1 ms) 2024-11-24T18:52:54,903 DEBUG [RS_CLOSE_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testSlowSyncLogRolling,,1732474281891.6368e8824a29bcabcc8580c2132b9082. 2024-11-24T18:52:55,001 DEBUG [RS:0;f2b92657890a:33415 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-24T18:52:55,201 DEBUG [RS:0;f2b92657890a:33415 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-24T18:52:55,237 INFO [RS_CLOSE_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.45 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:40437/user/jenkins/test-data/91bb223a-a964-b097-0799-aa34536414f2/data/hbase/meta/1588230740/.tmp/info/d9a95c4e98774697be95827ad3aedcbd 2024-11-24T18:52:55,270 DEBUG [RS_CLOSE_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40437/user/jenkins/test-data/91bb223a-a964-b097-0799-aa34536414f2/data/hbase/meta/1588230740/.tmp/ns/b475d88bbcb74a7ba42980ae9ab5205f is 43, key is default/ns:d/1732474281629/Put/seqid=0 2024-11-24T18:52:55,278 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42819 is added to blk_1073741852_1028 (size=5153) 2024-11-24T18:52:55,278 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46307 is added to blk_1073741852_1028 (size=5153) 2024-11-24T18:52:55,279 INFO [RS_CLOSE_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:40437/user/jenkins/test-data/91bb223a-a964-b097-0799-aa34536414f2/data/hbase/meta/1588230740/.tmp/ns/b475d88bbcb74a7ba42980ae9ab5205f 2024-11-24T18:52:55,304 DEBUG [RS_CLOSE_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40437/user/jenkins/test-data/91bb223a-a964-b097-0799-aa34536414f2/data/hbase/meta/1588230740/.tmp/table/5e62bf0eedc24be68e7faede886ad63f is 73, key is TestLogRolling-testSlowSyncLogRolling/table:state/1732474282366/Put/seqid=0 2024-11-24T18:52:55,310 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46307 is added to blk_1073741853_1029 (size=5396) 2024-11-24T18:52:55,310 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42819 is added to blk_1073741853_1029 (size=5396) 2024-11-24T18:52:55,315 INFO [RS_CLOSE_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=138 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:40437/user/jenkins/test-data/91bb223a-a964-b097-0799-aa34536414f2/data/hbase/meta/1588230740/.tmp/table/5e62bf0eedc24be68e7faede886ad63f 2024-11-24T18:52:55,324 DEBUG [RS_CLOSE_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40437/user/jenkins/test-data/91bb223a-a964-b097-0799-aa34536414f2/data/hbase/meta/1588230740/.tmp/info/d9a95c4e98774697be95827ad3aedcbd as hdfs://localhost:40437/user/jenkins/test-data/91bb223a-a964-b097-0799-aa34536414f2/data/hbase/meta/1588230740/info/d9a95c4e98774697be95827ad3aedcbd 2024-11-24T18:52:55,333 INFO [RS_CLOSE_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40437/user/jenkins/test-data/91bb223a-a964-b097-0799-aa34536414f2/data/hbase/meta/1588230740/info/d9a95c4e98774697be95827ad3aedcbd, entries=10, sequenceid=11, filesize=6.9 K 2024-11-24T18:52:55,334 DEBUG [RS_CLOSE_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40437/user/jenkins/test-data/91bb223a-a964-b097-0799-aa34536414f2/data/hbase/meta/1588230740/.tmp/ns/b475d88bbcb74a7ba42980ae9ab5205f as hdfs://localhost:40437/user/jenkins/test-data/91bb223a-a964-b097-0799-aa34536414f2/data/hbase/meta/1588230740/ns/b475d88bbcb74a7ba42980ae9ab5205f 2024-11-24T18:52:55,343 INFO [RS_CLOSE_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40437/user/jenkins/test-data/91bb223a-a964-b097-0799-aa34536414f2/data/hbase/meta/1588230740/ns/b475d88bbcb74a7ba42980ae9ab5205f, entries=2, sequenceid=11, filesize=5.0 K 2024-11-24T18:52:55,345 DEBUG [RS_CLOSE_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40437/user/jenkins/test-data/91bb223a-a964-b097-0799-aa34536414f2/data/hbase/meta/1588230740/.tmp/table/5e62bf0eedc24be68e7faede886ad63f as hdfs://localhost:40437/user/jenkins/test-data/91bb223a-a964-b097-0799-aa34536414f2/data/hbase/meta/1588230740/table/5e62bf0eedc24be68e7faede886ad63f 2024-11-24T18:52:55,355 INFO [RS_CLOSE_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40437/user/jenkins/test-data/91bb223a-a964-b097-0799-aa34536414f2/data/hbase/meta/1588230740/table/5e62bf0eedc24be68e7faede886ad63f, entries=2, sequenceid=11, filesize=5.3 K 2024-11-24T18:52:55,357 INFO [RS_CLOSE_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.65 KB/1692, heapSize ~3.38 KB/3456, currentSize=0 B/0 for 1588230740 in 557ms, sequenceid=11, compaction requested=false 2024-11-24T18:52:55,369 DEBUG [RS_CLOSE_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40437/user/jenkins/test-data/91bb223a-a964-b097-0799-aa34536414f2/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-24T18:52:55,370 DEBUG [RS_CLOSE_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-24T18:52:55,370 INFO [RS_CLOSE_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-24T18:52:55,370 DEBUG [RS_CLOSE_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732474374800Running coprocessor pre-close hooks at 1732474374800Disabling compacts and flushes for region at 1732474374800Disabling writes for close at 1732474374800Obtaining lock to block concurrent updates at 1732474374800Preparing flush snapshotting stores in 1588230740 at 1732474374800Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1692, getHeapSize=3696, getOffHeapSize=0, getCellsCount=14 at 1732474374801 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1732474374802 (+1 ms)Flushing 1588230740/info: creating writer at 1732474374802Flushing 1588230740/info: appending metadata at 1732474374822 (+20 ms)Flushing 1588230740/info: closing flushed file at 1732474374822Flushing 1588230740/ns: creating writer at 1732474375247 (+425 ms)Flushing 1588230740/ns: appending metadata at 1732474375269 (+22 ms)Flushing 1588230740/ns: closing flushed file at 1732474375269Flushing 1588230740/table: creating writer at 1732474375288 (+19 ms)Flushing 1588230740/table: appending metadata at 1732474375303 (+15 ms)Flushing 1588230740/table: closing flushed file at 1732474375303Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@300dbcc2: reopening flushed file at 1732474375323 (+20 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4e8f1fc2: reopening flushed file at 1732474375333 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4853c09: reopening flushed file at 1732474375344 (+11 ms)Finished flush of dataSize ~1.65 KB/1692, heapSize ~3.38 KB/3456, currentSize=0 B/0 for 1588230740 in 557ms, sequenceid=11, compaction requested=false at 1732474375357 (+13 ms)Writing region close event to WAL at 1732474375364 (+7 ms)Running coprocessor post-close hooks at 1732474375370 (+6 ms)Closed at 1732474375370 2024-11-24T18:52:55,371 DEBUG [RS_CLOSE_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-24T18:52:55,401 INFO [RS:0;f2b92657890a:33415 {}] regionserver.HRegionServer(976): stopping server f2b92657890a,33415,1732474278738; all regions closed. 2024-11-24T18:52:55,403 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:52:55,403 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:52:55,403 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:52:55,403 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:52:55,404 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:52:55,414 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42819 is added to blk_1073741834_1010 (size=3066) 2024-11-24T18:52:55,414 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46307 is added to blk_1073741834_1010 (size=3066) 2024-11-24T18:52:55,775 INFO [regionserver/f2b92657890a:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-24T18:52:55,775 INFO [regionserver/f2b92657890a:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-24T18:52:55,811 DEBUG [RS:0;f2b92657890a:33415 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/91bb223a-a964-b097-0799-aa34536414f2/oldWALs 2024-11-24T18:52:55,811 INFO [RS:0;f2b92657890a:33415 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog f2b92657890a%2C33415%2C1732474278738.meta:.meta(num 1732474281307) 2024-11-24T18:52:55,814 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:52:55,814 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:52:55,814 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:52:55,814 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:52:55,814 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:52:55,818 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42819 is added to blk_1073741848_1024 (size=12695) 2024-11-24T18:52:55,818 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46307 is added to blk_1073741848_1024 (size=12695) 2024-11-24T18:52:55,822 DEBUG [RS:0;f2b92657890a:33415 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/91bb223a-a964-b097-0799-aa34536414f2/oldWALs 2024-11-24T18:52:55,822 INFO [RS:0;f2b92657890a:33415 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog f2b92657890a%2C33415%2C1732474278738:(num 1732474354645) 2024-11-24T18:52:55,822 DEBUG [RS:0;f2b92657890a:33415 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T18:52:55,822 INFO [RS:0;f2b92657890a:33415 {}] regionserver.LeaseManager(133): Closed leases 2024-11-24T18:52:55,822 INFO [RS:0;f2b92657890a:33415 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-24T18:52:55,822 INFO [RS:0;f2b92657890a:33415 {}] hbase.ChoreService(370): Chore service for: regionserver/f2b92657890a:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-24T18:52:55,823 INFO [RS:0;f2b92657890a:33415 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-24T18:52:55,823 INFO [regionserver/f2b92657890a:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-24T18:52:55,823 INFO [RS:0;f2b92657890a:33415 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:33415 2024-11-24T18:52:55,878 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44605-0x1016e2f888c0000, quorum=127.0.0.1:64439, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-24T18:52:55,878 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33415-0x1016e2f888c0001, quorum=127.0.0.1:64439, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/f2b92657890a,33415,1732474278738 2024-11-24T18:52:55,878 INFO [RS:0;f2b92657890a:33415 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-24T18:52:55,879 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [f2b92657890a,33415,1732474278738] 2024-11-24T18:52:55,901 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/f2b92657890a,33415,1732474278738 already deleted, retry=false 2024-11-24T18:52:55,901 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; f2b92657890a,33415,1732474278738 expired; onlineServers=0 2024-11-24T18:52:55,901 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'f2b92657890a,44605,1732474277958' ***** 2024-11-24T18:52:55,902 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-24T18:52:55,902 INFO [M:0;f2b92657890a:44605 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-24T18:52:55,902 INFO [M:0;f2b92657890a:44605 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-24T18:52:55,902 DEBUG [M:0;f2b92657890a:44605 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-24T18:52:55,902 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-24T18:52:55,902 DEBUG [M:0;f2b92657890a:44605 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-24T18:52:55,902 DEBUG [master/f2b92657890a:0:becomeActiveMaster-HFileCleaner.large.0-1732474280481 {}] cleaner.HFileCleaner(306): Exit Thread[master/f2b92657890a:0:becomeActiveMaster-HFileCleaner.large.0-1732474280481,5,FailOnTimeoutGroup] 2024-11-24T18:52:55,902 INFO [M:0;f2b92657890a:44605 {}] hbase.ChoreService(370): Chore service for: master/f2b92657890a:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-24T18:52:55,903 INFO [M:0;f2b92657890a:44605 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-24T18:52:55,903 DEBUG [M:0;f2b92657890a:44605 {}] master.HMaster(1795): Stopping service threads 2024-11-24T18:52:55,903 INFO [M:0;f2b92657890a:44605 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-24T18:52:55,903 INFO [M:0;f2b92657890a:44605 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-24T18:52:55,903 INFO [M:0;f2b92657890a:44605 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-24T18:52:55,904 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-24T18:52:55,904 DEBUG [master/f2b92657890a:0:becomeActiveMaster-HFileCleaner.small.0-1732474280482 {}] cleaner.HFileCleaner(306): Exit Thread[master/f2b92657890a:0:becomeActiveMaster-HFileCleaner.small.0-1732474280482,5,FailOnTimeoutGroup] 2024-11-24T18:52:55,911 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44605-0x1016e2f888c0000, quorum=127.0.0.1:64439, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-24T18:52:55,912 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44605-0x1016e2f888c0000, quorum=127.0.0.1:64439, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T18:52:55,912 DEBUG [M:0;f2b92657890a:44605 {}] zookeeper.ZKUtil(347): master:44605-0x1016e2f888c0000, quorum=127.0.0.1:64439, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-24T18:52:55,912 WARN [M:0;f2b92657890a:44605 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-24T18:52:55,913 INFO [M:0;f2b92657890a:44605 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:40437/user/jenkins/test-data/91bb223a-a964-b097-0799-aa34536414f2/.lastflushedseqids 2024-11-24T18:52:55,929 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46307 is added to blk_1073741854_1030 (size=130) 2024-11-24T18:52:55,929 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42819 is added to blk_1073741854_1030 (size=130) 2024-11-24T18:52:55,991 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33415-0x1016e2f888c0001, quorum=127.0.0.1:64439, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-24T18:52:55,991 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33415-0x1016e2f888c0001, quorum=127.0.0.1:64439, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-24T18:52:55,991 INFO [RS:0;f2b92657890a:33415 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-24T18:52:55,992 INFO [RS:0;f2b92657890a:33415 {}] regionserver.HRegionServer(1031): Exiting; stopping=f2b92657890a,33415,1732474278738; zookeeper connection closed. 2024-11-24T18:52:55,992 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@35b5b7f5 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@35b5b7f5 2024-11-24T18:52:55,993 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-24T18:52:56,330 INFO [M:0;f2b92657890a:44605 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-24T18:52:56,330 INFO [M:0;f2b92657890a:44605 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-24T18:52:56,330 DEBUG [M:0;f2b92657890a:44605 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-24T18:52:56,330 INFO [M:0;f2b92657890a:44605 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T18:52:56,330 DEBUG [M:0;f2b92657890a:44605 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T18:52:56,330 DEBUG [M:0;f2b92657890a:44605 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-24T18:52:56,330 DEBUG [M:0;f2b92657890a:44605 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T18:52:56,331 INFO [M:0;f2b92657890a:44605 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.02 KB heapSize=29.20 KB 2024-11-24T18:52:56,352 DEBUG [M:0;f2b92657890a:44605 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40437/user/jenkins/test-data/91bb223a-a964-b097-0799-aa34536414f2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/d76352e54aba48d8a7be54a172c5b3ff is 82, key is hbase:meta,,1/info:regioninfo/1732474281376/Put/seqid=0 2024-11-24T18:52:56,365 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42819 is added to blk_1073741855_1031 (size=5672) 2024-11-24T18:52:56,365 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46307 is added to blk_1073741855_1031 (size=5672) 2024-11-24T18:52:56,367 INFO [M:0;f2b92657890a:44605 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:40437/user/jenkins/test-data/91bb223a-a964-b097-0799-aa34536414f2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/d76352e54aba48d8a7be54a172c5b3ff 2024-11-24T18:52:56,399 DEBUG [M:0;f2b92657890a:44605 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40437/user/jenkins/test-data/91bb223a-a964-b097-0799-aa34536414f2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/be395e98990e4de986a54d9e0af80579 is 766, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1732474282375/Put/seqid=0 2024-11-24T18:52:56,410 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42819 is added to blk_1073741856_1032 (size=6247) 2024-11-24T18:52:56,410 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46307 is added to blk_1073741856_1032 (size=6247) 2024-11-24T18:52:56,413 INFO [M:0;f2b92657890a:44605 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.42 KB at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:40437/user/jenkins/test-data/91bb223a-a964-b097-0799-aa34536414f2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/be395e98990e4de986a54d9e0af80579 2024-11-24T18:52:56,421 INFO [M:0;f2b92657890a:44605 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for be395e98990e4de986a54d9e0af80579 2024-11-24T18:52:56,445 DEBUG [M:0;f2b92657890a:44605 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40437/user/jenkins/test-data/91bb223a-a964-b097-0799-aa34536414f2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/b2759b350914432f9910da0c9f1acf59 is 69, key is f2b92657890a,33415,1732474278738/rs:state/1732474280578/Put/seqid=0 2024-11-24T18:52:56,458 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42819 is added to blk_1073741857_1033 (size=5156) 2024-11-24T18:52:56,458 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46307 is added to blk_1073741857_1033 (size=5156) 2024-11-24T18:52:56,459 INFO [M:0;f2b92657890a:44605 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:40437/user/jenkins/test-data/91bb223a-a964-b097-0799-aa34536414f2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/b2759b350914432f9910da0c9f1acf59 2024-11-24T18:52:56,493 DEBUG [M:0;f2b92657890a:44605 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40437/user/jenkins/test-data/91bb223a-a964-b097-0799-aa34536414f2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/21cac40fe898486ab490b9302ca3654b is 52, key is load_balancer_on/state:d/1732474281869/Put/seqid=0 2024-11-24T18:52:56,504 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46307 is added to blk_1073741858_1034 (size=5056) 2024-11-24T18:52:56,504 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42819 is added to blk_1073741858_1034 (size=5056) 2024-11-24T18:52:56,504 INFO [M:0;f2b92657890a:44605 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:40437/user/jenkins/test-data/91bb223a-a964-b097-0799-aa34536414f2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/21cac40fe898486ab490b9302ca3654b 2024-11-24T18:52:56,512 DEBUG [M:0;f2b92657890a:44605 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40437/user/jenkins/test-data/91bb223a-a964-b097-0799-aa34536414f2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/d76352e54aba48d8a7be54a172c5b3ff as hdfs://localhost:40437/user/jenkins/test-data/91bb223a-a964-b097-0799-aa34536414f2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/d76352e54aba48d8a7be54a172c5b3ff 2024-11-24T18:52:56,521 INFO [M:0;f2b92657890a:44605 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40437/user/jenkins/test-data/91bb223a-a964-b097-0799-aa34536414f2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/d76352e54aba48d8a7be54a172c5b3ff, entries=8, sequenceid=59, filesize=5.5 K 2024-11-24T18:52:56,522 DEBUG [M:0;f2b92657890a:44605 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40437/user/jenkins/test-data/91bb223a-a964-b097-0799-aa34536414f2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/be395e98990e4de986a54d9e0af80579 as hdfs://localhost:40437/user/jenkins/test-data/91bb223a-a964-b097-0799-aa34536414f2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/be395e98990e4de986a54d9e0af80579 2024-11-24T18:52:56,531 INFO [M:0;f2b92657890a:44605 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for be395e98990e4de986a54d9e0af80579 2024-11-24T18:52:56,531 INFO [M:0;f2b92657890a:44605 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40437/user/jenkins/test-data/91bb223a-a964-b097-0799-aa34536414f2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/be395e98990e4de986a54d9e0af80579, entries=6, sequenceid=59, filesize=6.1 K 2024-11-24T18:52:56,533 DEBUG [M:0;f2b92657890a:44605 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40437/user/jenkins/test-data/91bb223a-a964-b097-0799-aa34536414f2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/b2759b350914432f9910da0c9f1acf59 as hdfs://localhost:40437/user/jenkins/test-data/91bb223a-a964-b097-0799-aa34536414f2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/b2759b350914432f9910da0c9f1acf59 2024-11-24T18:52:56,548 INFO [M:0;f2b92657890a:44605 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40437/user/jenkins/test-data/91bb223a-a964-b097-0799-aa34536414f2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/b2759b350914432f9910da0c9f1acf59, entries=1, sequenceid=59, filesize=5.0 K 2024-11-24T18:52:56,551 DEBUG [M:0;f2b92657890a:44605 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40437/user/jenkins/test-data/91bb223a-a964-b097-0799-aa34536414f2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/21cac40fe898486ab490b9302ca3654b as hdfs://localhost:40437/user/jenkins/test-data/91bb223a-a964-b097-0799-aa34536414f2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/21cac40fe898486ab490b9302ca3654b 2024-11-24T18:52:56,571 INFO [M:0;f2b92657890a:44605 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40437/user/jenkins/test-data/91bb223a-a964-b097-0799-aa34536414f2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/21cac40fe898486ab490b9302ca3654b, entries=1, sequenceid=59, filesize=4.9 K 2024-11-24T18:52:56,573 INFO [M:0;f2b92657890a:44605 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.02 KB/23576, heapSize ~29.14 KB/29840, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 243ms, sequenceid=59, compaction requested=false 2024-11-24T18:52:56,579 INFO [M:0;f2b92657890a:44605 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T18:52:56,580 DEBUG [M:0;f2b92657890a:44605 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732474376330Disabling compacts and flushes for region at 1732474376330Disabling writes for close at 1732474376330Obtaining lock to block concurrent updates at 1732474376331 (+1 ms)Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732474376331Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23576, getHeapSize=29840, getOffHeapSize=0, getCellsCount=70 at 1732474376331Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732474376332 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732474376332Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732474376351 (+19 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732474376351Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732474376375 (+24 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732474376398 (+23 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732474376398Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732474376421 (+23 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732474376444 (+23 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732474376444Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1732474376471 (+27 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1732474376492 (+21 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1732474376492Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@426b3874: reopening flushed file at 1732474376511 (+19 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5108e766: reopening flushed file at 1732474376521 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4deb70af: reopening flushed file at 1732474376531 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3096cdb5: reopening flushed file at 1732474376549 (+18 ms)Finished flush of dataSize ~23.02 KB/23576, heapSize ~29.14 KB/29840, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 243ms, sequenceid=59, compaction requested=false at 1732474376573 (+24 ms)Writing region close event to WAL at 1732474376579 (+6 ms)Closed at 1732474376579 2024-11-24T18:52:56,584 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:52:56,585 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:52:56,585 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:52:56,585 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:52:56,585 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:52:56,588 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46307 is added to blk_1073741830_1006 (size=27973) 2024-11-24T18:52:56,588 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42819 is added to blk_1073741830_1006 (size=27973) 2024-11-24T18:52:56,592 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-24T18:52:56,592 INFO [M:0;f2b92657890a:44605 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-24T18:52:56,592 INFO [M:0;f2b92657890a:44605 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:44605 2024-11-24T18:52:56,592 INFO [M:0;f2b92657890a:44605 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-24T18:52:56,698 INFO [regionserver/f2b92657890a:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-24T18:52:56,746 INFO [M:0;f2b92657890a:44605 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-24T18:52:56,746 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44605-0x1016e2f888c0000, quorum=127.0.0.1:64439, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-24T18:52:56,746 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44605-0x1016e2f888c0000, quorum=127.0.0.1:64439, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-24T18:52:56,751 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1bf97579{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T18:52:56,754 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@22b88bcb{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-24T18:52:56,754 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-24T18:52:56,755 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2d48d695{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-24T18:52:56,755 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@11effdcd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9ded2da6-1b14-153c-0ff9-970bf5ed7568/hadoop.log.dir/,STOPPED} 2024-11-24T18:52:56,759 WARN [BP-1886669259-172.17.0.2-1732474272711 heartbeating to localhost/127.0.0.1:40437 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-24T18:52:56,759 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-24T18:52:56,759 WARN [BP-1886669259-172.17.0.2-1732474272711 heartbeating to localhost/127.0.0.1:40437 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1886669259-172.17.0.2-1732474272711 (Datanode Uuid ad9e1b75-2c4c-4d0a-ae2c-b40b27af35c5) service to localhost/127.0.0.1:40437 2024-11-24T18:52:56,759 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-24T18:52:56,760 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9ded2da6-1b14-153c-0ff9-970bf5ed7568/cluster_8fd6d39d-f1e7-d111-0b8b-0e415c4d9e0a/data/data3/current/BP-1886669259-172.17.0.2-1732474272711 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T18:52:56,760 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9ded2da6-1b14-153c-0ff9-970bf5ed7568/cluster_8fd6d39d-f1e7-d111-0b8b-0e415c4d9e0a/data/data4/current/BP-1886669259-172.17.0.2-1732474272711 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T18:52:56,761 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-24T18:52:56,769 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7b07d1ba{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T18:52:56,769 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@43e0a762{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-24T18:52:56,769 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-24T18:52:56,769 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@371e191c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-24T18:52:56,770 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@28778f0f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9ded2da6-1b14-153c-0ff9-970bf5ed7568/hadoop.log.dir/,STOPPED} 2024-11-24T18:52:56,772 WARN [BP-1886669259-172.17.0.2-1732474272711 heartbeating to localhost/127.0.0.1:40437 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-24T18:52:56,772 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-24T18:52:56,772 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-24T18:52:56,772 WARN [BP-1886669259-172.17.0.2-1732474272711 heartbeating to localhost/127.0.0.1:40437 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1886669259-172.17.0.2-1732474272711 (Datanode Uuid 19d4d744-57ac-4c09-864c-5dfbf9012692) service to localhost/127.0.0.1:40437 2024-11-24T18:52:56,773 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9ded2da6-1b14-153c-0ff9-970bf5ed7568/cluster_8fd6d39d-f1e7-d111-0b8b-0e415c4d9e0a/data/data1/current/BP-1886669259-172.17.0.2-1732474272711 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T18:52:56,773 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9ded2da6-1b14-153c-0ff9-970bf5ed7568/cluster_8fd6d39d-f1e7-d111-0b8b-0e415c4d9e0a/data/data2/current/BP-1886669259-172.17.0.2-1732474272711 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T18:52:56,773 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-24T18:52:56,782 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@735fa16a{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-24T18:52:56,783 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6c26a5a3{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-24T18:52:56,783 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-24T18:52:56,783 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@70be1389{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-24T18:52:56,783 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@ddc8467{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9ded2da6-1b14-153c-0ff9-970bf5ed7568/hadoop.log.dir/,STOPPED} 2024-11-24T18:52:56,792 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-24T18:52:56,829 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-24T18:52:56,839 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testSlowSyncLogRolling Thread=81 (was 12) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: SnapshotHandlerChoreCleaner java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-2-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:40437 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-2-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: LeaseRenewer:jenkins.hfs.0@localhost:40437 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: nioEventLoopGroup-5-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-4 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:40437 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-2-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HBase-Metrics2-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-5-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: weak-ref-cleaner-strictcontextstorage java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RpcClient-timer-pool-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: Async-Client-Retry-Timer-pool-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:40437 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: master/f2b92657890a:0:becomeActiveMaster-MemStoreChunkPool Statistics java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-4-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: org.apache.hadoop.hdfs.PeerCache@c6b7e43 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Time-limited test.named-queue-events-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:40437 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-7 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: master/f2b92657890a:0:becomeActiveMaster-MemStoreChunkPool Statistics java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: ForkJoinPool-2-worker-6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: ForkJoinPool-2-worker-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: Monitor thread for TaskMonitor java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-3-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:40437 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: ForkJoinPool-2-worker-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: HMaster-EventLoopGroup-1-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: SessionTracker java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:40437 from jenkins.hfs.0 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: SSL Certificates Store Monitor java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-3-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-5-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-4-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-1 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-4-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: region-location-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: regionserver/f2b92657890a:0.procedureResultReporter java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-3-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:40437 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Idle-Rpc-Conn-Sweeper-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'HBase' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: ForkJoinPool-2-worker-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: HMaster-EventLoopGroup-1-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-1-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=402 (was 287) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=155 (was 310), ProcessCount=11 (was 11), AvailableMemoryMB=9914 (was 9487) - AvailableMemoryMB LEAK? - 2024-11-24T18:52:56,847 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnDatanodeDeath Thread=82, OpenFileDescriptor=402, MaxFileDescriptor=1048576, SystemLoadAverage=155, ProcessCount=11, AvailableMemoryMB=9913 2024-11-24T18:52:56,847 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-24T18:52:56,848 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9ded2da6-1b14-153c-0ff9-970bf5ed7568/hadoop.log.dir so I do NOT create it in target/test-data/a73be3e4-71ec-b7d1-ab6a-a202f4a80da2 2024-11-24T18:52:56,848 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9ded2da6-1b14-153c-0ff9-970bf5ed7568/hadoop.tmp.dir so I do NOT create it in target/test-data/a73be3e4-71ec-b7d1-ab6a-a202f4a80da2 2024-11-24T18:52:56,848 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a73be3e4-71ec-b7d1-ab6a-a202f4a80da2/cluster_7fffde26-21e2-9b66-c37c-f0466c01c8ba, deleteOnExit=true 2024-11-24T18:52:56,848 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-24T18:52:56,848 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a73be3e4-71ec-b7d1-ab6a-a202f4a80da2/test.cache.data in system properties and HBase conf 2024-11-24T18:52:56,848 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a73be3e4-71ec-b7d1-ab6a-a202f4a80da2/hadoop.tmp.dir in system properties and HBase conf 2024-11-24T18:52:56,849 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a73be3e4-71ec-b7d1-ab6a-a202f4a80da2/hadoop.log.dir in system properties and HBase conf 2024-11-24T18:52:56,849 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a73be3e4-71ec-b7d1-ab6a-a202f4a80da2/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-24T18:52:56,849 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a73be3e4-71ec-b7d1-ab6a-a202f4a80da2/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-24T18:52:56,849 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-24T18:52:56,849 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-24T18:52:56,849 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a73be3e4-71ec-b7d1-ab6a-a202f4a80da2/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-24T18:52:56,849 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a73be3e4-71ec-b7d1-ab6a-a202f4a80da2/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-24T18:52:56,850 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a73be3e4-71ec-b7d1-ab6a-a202f4a80da2/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-24T18:52:56,850 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a73be3e4-71ec-b7d1-ab6a-a202f4a80da2/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-24T18:52:56,850 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a73be3e4-71ec-b7d1-ab6a-a202f4a80da2/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-24T18:52:56,850 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a73be3e4-71ec-b7d1-ab6a-a202f4a80da2/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-24T18:52:56,850 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a73be3e4-71ec-b7d1-ab6a-a202f4a80da2/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-24T18:52:56,850 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a73be3e4-71ec-b7d1-ab6a-a202f4a80da2/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-24T18:52:56,850 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a73be3e4-71ec-b7d1-ab6a-a202f4a80da2/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-24T18:52:56,851 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a73be3e4-71ec-b7d1-ab6a-a202f4a80da2/nfs.dump.dir in system properties and HBase conf 2024-11-24T18:52:56,851 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a73be3e4-71ec-b7d1-ab6a-a202f4a80da2/java.io.tmpdir in system properties and HBase conf 2024-11-24T18:52:56,851 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a73be3e4-71ec-b7d1-ab6a-a202f4a80da2/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-24T18:52:56,851 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a73be3e4-71ec-b7d1-ab6a-a202f4a80da2/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-24T18:52:56,851 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a73be3e4-71ec-b7d1-ab6a-a202f4a80da2/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-24T18:52:56,871 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-24T18:52:57,420 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T18:52:57,428 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-24T18:52:57,434 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-24T18:52:57,434 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-24T18:52:57,434 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-24T18:52:57,435 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T18:52:57,435 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1a15ed6a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a73be3e4-71ec-b7d1-ab6a-a202f4a80da2/hadoop.log.dir/,AVAILABLE} 2024-11-24T18:52:57,436 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2152d149{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-24T18:52:57,535 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3fa5684d{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a73be3e4-71ec-b7d1-ab6a-a202f4a80da2/java.io.tmpdir/jetty-localhost-36487-hadoop-hdfs-3_4_1-tests_jar-_-any-18098171497138039118/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-24T18:52:57,535 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7bd218c7{HTTP/1.1, (http/1.1)}{localhost:36487} 2024-11-24T18:52:57,535 INFO [Time-limited test {}] server.Server(415): Started @106888ms 2024-11-24T18:52:57,548 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-24T18:52:57,813 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T18:52:57,817 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-24T18:52:57,824 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-24T18:52:57,825 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-24T18:52:57,825 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-24T18:52:57,825 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6e7873b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a73be3e4-71ec-b7d1-ab6a-a202f4a80da2/hadoop.log.dir/,AVAILABLE} 2024-11-24T18:52:57,826 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@21e00560{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-24T18:52:57,923 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@54b1fb62{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a73be3e4-71ec-b7d1-ab6a-a202f4a80da2/java.io.tmpdir/jetty-localhost-43453-hadoop-hdfs-3_4_1-tests_jar-_-any-17813855644722530054/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T18:52:57,923 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@38966cd1{HTTP/1.1, (http/1.1)}{localhost:43453} 2024-11-24T18:52:57,923 INFO [Time-limited test {}] server.Server(415): Started @107276ms 2024-11-24T18:52:57,925 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-24T18:52:57,979 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T18:52:57,983 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-24T18:52:57,985 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-24T18:52:57,985 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-24T18:52:57,985 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-24T18:52:57,987 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@573af0f2{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a73be3e4-71ec-b7d1-ab6a-a202f4a80da2/hadoop.log.dir/,AVAILABLE} 2024-11-24T18:52:57,988 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@17d00685{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-24T18:52:58,089 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@668848ff{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a73be3e4-71ec-b7d1-ab6a-a202f4a80da2/java.io.tmpdir/jetty-localhost-38429-hadoop-hdfs-3_4_1-tests_jar-_-any-16936947998596998634/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T18:52:58,089 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1dca50a{HTTP/1.1, (http/1.1)}{localhost:38429} 2024-11-24T18:52:58,089 INFO [Time-limited test {}] server.Server(415): Started @107441ms 2024-11-24T18:52:58,091 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-24T18:52:58,327 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-24T18:52:58,328 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-24T18:52:58,329 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-24T18:52:58,330 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling 2024-11-24T18:52:59,091 WARN [Thread-452 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a73be3e4-71ec-b7d1-ab6a-a202f4a80da2/cluster_7fffde26-21e2-9b66-c37c-f0466c01c8ba/data/data1/current/BP-1416387336-172.17.0.2-1732474376887/current, will proceed with Du for space computation calculation, 2024-11-24T18:52:59,091 WARN [Thread-453 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a73be3e4-71ec-b7d1-ab6a-a202f4a80da2/cluster_7fffde26-21e2-9b66-c37c-f0466c01c8ba/data/data2/current/BP-1416387336-172.17.0.2-1732474376887/current, will proceed with Du for space computation calculation, 2024-11-24T18:52:59,118 WARN [Thread-416 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-24T18:52:59,121 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x5d101167059ef69c with lease ID 0x834ae8c62b48f517: Processing first storage report for DS-2e9b31bd-b8a7-41c8-a153-4701b865d971 from datanode DatanodeRegistration(127.0.0.1:46539, datanodeUuid=90a21d30-efc4-4c54-85aa-c7be11f6cb62, infoPort=44969, infoSecurePort=0, ipcPort=41787, storageInfo=lv=-57;cid=testClusterID;nsid=735831748;c=1732474376887) 2024-11-24T18:52:59,121 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x5d101167059ef69c with lease ID 0x834ae8c62b48f517: from storage DS-2e9b31bd-b8a7-41c8-a153-4701b865d971 node DatanodeRegistration(127.0.0.1:46539, datanodeUuid=90a21d30-efc4-4c54-85aa-c7be11f6cb62, infoPort=44969, infoSecurePort=0, ipcPort=41787, storageInfo=lv=-57;cid=testClusterID;nsid=735831748;c=1732474376887), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-24T18:52:59,121 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x5d101167059ef69c with lease ID 0x834ae8c62b48f517: Processing first storage report for DS-c8126f7e-b356-4bf3-b1af-09b120b4d2be from datanode DatanodeRegistration(127.0.0.1:46539, datanodeUuid=90a21d30-efc4-4c54-85aa-c7be11f6cb62, infoPort=44969, infoSecurePort=0, ipcPort=41787, storageInfo=lv=-57;cid=testClusterID;nsid=735831748;c=1732474376887) 2024-11-24T18:52:59,121 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x5d101167059ef69c with lease ID 0x834ae8c62b48f517: from storage DS-c8126f7e-b356-4bf3-b1af-09b120b4d2be node DatanodeRegistration(127.0.0.1:46539, datanodeUuid=90a21d30-efc4-4c54-85aa-c7be11f6cb62, infoPort=44969, infoSecurePort=0, ipcPort=41787, storageInfo=lv=-57;cid=testClusterID;nsid=735831748;c=1732474376887), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T18:52:59,242 WARN [Thread-463 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a73be3e4-71ec-b7d1-ab6a-a202f4a80da2/cluster_7fffde26-21e2-9b66-c37c-f0466c01c8ba/data/data3/current/BP-1416387336-172.17.0.2-1732474376887/current, will proceed with Du for space computation calculation, 2024-11-24T18:52:59,243 WARN [Thread-464 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a73be3e4-71ec-b7d1-ab6a-a202f4a80da2/cluster_7fffde26-21e2-9b66-c37c-f0466c01c8ba/data/data4/current/BP-1416387336-172.17.0.2-1732474376887/current, will proceed with Du for space computation calculation, 2024-11-24T18:52:59,265 WARN [Thread-439 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-24T18:52:59,268 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xdec4cb12baf124ac with lease ID 0x834ae8c62b48f518: Processing first storage report for DS-8839b053-236a-49bc-9b02-8472d9327460 from datanode DatanodeRegistration(127.0.0.1:41537, datanodeUuid=193933c0-9521-4621-ab64-d339023ba983, infoPort=37027, infoSecurePort=0, ipcPort=45795, storageInfo=lv=-57;cid=testClusterID;nsid=735831748;c=1732474376887) 2024-11-24T18:52:59,268 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xdec4cb12baf124ac with lease ID 0x834ae8c62b48f518: from storage DS-8839b053-236a-49bc-9b02-8472d9327460 node DatanodeRegistration(127.0.0.1:41537, datanodeUuid=193933c0-9521-4621-ab64-d339023ba983, infoPort=37027, infoSecurePort=0, ipcPort=45795, storageInfo=lv=-57;cid=testClusterID;nsid=735831748;c=1732474376887), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-24T18:52:59,268 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xdec4cb12baf124ac with lease ID 0x834ae8c62b48f518: Processing first storage report for DS-61eb360a-aeff-45f2-aff3-da84ce5b8e82 from datanode DatanodeRegistration(127.0.0.1:41537, datanodeUuid=193933c0-9521-4621-ab64-d339023ba983, infoPort=37027, infoSecurePort=0, ipcPort=45795, storageInfo=lv=-57;cid=testClusterID;nsid=735831748;c=1732474376887) 2024-11-24T18:52:59,268 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xdec4cb12baf124ac with lease ID 0x834ae8c62b48f518: from storage DS-61eb360a-aeff-45f2-aff3-da84ce5b8e82 node DatanodeRegistration(127.0.0.1:41537, datanodeUuid=193933c0-9521-4621-ab64-d339023ba983, infoPort=37027, infoSecurePort=0, ipcPort=45795, storageInfo=lv=-57;cid=testClusterID;nsid=735831748;c=1732474376887), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T18:52:59,335 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a73be3e4-71ec-b7d1-ab6a-a202f4a80da2 2024-11-24T18:52:59,338 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a73be3e4-71ec-b7d1-ab6a-a202f4a80da2/cluster_7fffde26-21e2-9b66-c37c-f0466c01c8ba/zookeeper_0, clientPort=49562, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a73be3e4-71ec-b7d1-ab6a-a202f4a80da2/cluster_7fffde26-21e2-9b66-c37c-f0466c01c8ba/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a73be3e4-71ec-b7d1-ab6a-a202f4a80da2/cluster_7fffde26-21e2-9b66-c37c-f0466c01c8ba/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-24T18:52:59,339 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=49562 2024-11-24T18:52:59,339 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T18:52:59,342 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T18:52:59,353 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41537 is added to blk_1073741825_1001 (size=7) 2024-11-24T18:52:59,354 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46539 is added to blk_1073741825_1001 (size=7) 2024-11-24T18:52:59,355 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:40133/user/jenkins/test-data/e2181a76-a6ea-cbf4-6f65-5ed5cb1b2424 with version=8 2024-11-24T18:52:59,355 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:40437/user/jenkins/test-data/91bb223a-a964-b097-0799-aa34536414f2/hbase-staging 2024-11-24T18:52:59,357 INFO [Time-limited test {}] client.ConnectionUtils(128): master/f2b92657890a:0 server-side Connection retries=45 2024-11-24T18:52:59,357 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-24T18:52:59,358 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-24T18:52:59,358 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-24T18:52:59,358 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-24T18:52:59,358 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-24T18:52:59,358 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-24T18:52:59,358 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-24T18:52:59,359 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:40329 2024-11-24T18:52:59,360 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:40329 connecting to ZooKeeper ensemble=127.0.0.1:49562 2024-11-24T18:52:59,472 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:403290x0, quorum=127.0.0.1:49562, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-24T18:52:59,472 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:40329-0x1016e3117f40000 connected 2024-11-24T18:52:59,903 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:52:59,908 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:53:00,015 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T18:53:00,020 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T18:53:00,025 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:40329-0x1016e3117f40000, quorum=127.0.0.1:49562, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-24T18:53:00,025 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:40133/user/jenkins/test-data/e2181a76-a6ea-cbf4-6f65-5ed5cb1b2424, hbase.cluster.distributed=false 2024-11-24T18:53:00,027 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:40329-0x1016e3117f40000, quorum=127.0.0.1:49562, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-24T18:53:00,027 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=40329 2024-11-24T18:53:00,027 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=40329 2024-11-24T18:53:00,028 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=40329 2024-11-24T18:53:00,028 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=40329 2024-11-24T18:53:00,028 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=40329 2024-11-24T18:53:00,045 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/f2b92657890a:0 server-side Connection retries=45 2024-11-24T18:53:00,045 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-24T18:53:00,045 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-24T18:53:00,045 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-24T18:53:00,045 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-24T18:53:00,045 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-24T18:53:00,045 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-24T18:53:00,046 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-24T18:53:00,046 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:46319 2024-11-24T18:53:00,048 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:46319 connecting to ZooKeeper ensemble=127.0.0.1:49562 2024-11-24T18:53:00,048 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T18:53:00,051 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T18:53:00,150 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:463190x0, quorum=127.0.0.1:49562, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-24T18:53:00,150 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:46319-0x1016e3117f40001 connected 2024-11-24T18:53:00,151 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:46319-0x1016e3117f40001, quorum=127.0.0.1:49562, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-24T18:53:00,151 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-24T18:53:00,152 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-24T18:53:00,153 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:46319-0x1016e3117f40001, quorum=127.0.0.1:49562, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-24T18:53:00,155 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:46319-0x1016e3117f40001, quorum=127.0.0.1:49562, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-24T18:53:00,156 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=46319 2024-11-24T18:53:00,156 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=46319 2024-11-24T18:53:00,157 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=46319 2024-11-24T18:53:00,158 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=46319 2024-11-24T18:53:00,158 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=46319 2024-11-24T18:53:00,177 DEBUG [M:0;f2b92657890a:40329 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;f2b92657890a:40329 2024-11-24T18:53:00,177 INFO [master/f2b92657890a:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/f2b92657890a,40329,1732474379357 2024-11-24T18:53:00,268 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46319-0x1016e3117f40001, quorum=127.0.0.1:49562, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-24T18:53:00,268 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40329-0x1016e3117f40000, quorum=127.0.0.1:49562, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-24T18:53:00,270 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:40329-0x1016e3117f40000, quorum=127.0.0.1:49562, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/f2b92657890a,40329,1732474379357 2024-11-24T18:53:00,415 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46319-0x1016e3117f40001, quorum=127.0.0.1:49562, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-24T18:53:00,415 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40329-0x1016e3117f40000, quorum=127.0.0.1:49562, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T18:53:00,416 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46319-0x1016e3117f40001, quorum=127.0.0.1:49562, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T18:53:00,418 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:40329-0x1016e3117f40000, quorum=127.0.0.1:49562, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-24T18:53:00,419 INFO [master/f2b92657890a:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/f2b92657890a,40329,1732474379357 from backup master directory 2024-11-24T18:53:00,424 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-24T18:53:00,426 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:53:00,441 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:53:00,499 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40329-0x1016e3117f40000, quorum=127.0.0.1:49562, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/f2b92657890a,40329,1732474379357 2024-11-24T18:53:00,499 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46319-0x1016e3117f40001, quorum=127.0.0.1:49562, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-24T18:53:00,499 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40329-0x1016e3117f40000, quorum=127.0.0.1:49562, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-24T18:53:00,499 WARN [master/f2b92657890a:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-24T18:53:00,500 INFO [master/f2b92657890a:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=f2b92657890a,40329,1732474379357 2024-11-24T18:53:00,509 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:40133/user/jenkins/test-data/e2181a76-a6ea-cbf4-6f65-5ed5cb1b2424/hbase.id] with ID: fb304e74-6c5b-4d0b-9658-fa1a93502d99 2024-11-24T18:53:00,509 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:40133/user/jenkins/test-data/e2181a76-a6ea-cbf4-6f65-5ed5cb1b2424/.tmp/hbase.id 2024-11-24T18:53:00,516 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46539 is added to blk_1073741826_1002 (size=42) 2024-11-24T18:53:00,516 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41537 is added to blk_1073741826_1002 (size=42) 2024-11-24T18:53:00,517 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:40133/user/jenkins/test-data/e2181a76-a6ea-cbf4-6f65-5ed5cb1b2424/.tmp/hbase.id]:[hdfs://localhost:40133/user/jenkins/test-data/e2181a76-a6ea-cbf4-6f65-5ed5cb1b2424/hbase.id] 2024-11-24T18:53:00,532 INFO [master/f2b92657890a:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T18:53:00,532 INFO [master/f2b92657890a:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-24T18:53:00,533 INFO [master/f2b92657890a:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-24T18:53:00,595 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46319-0x1016e3117f40001, quorum=127.0.0.1:49562, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T18:53:00,595 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40329-0x1016e3117f40000, quorum=127.0.0.1:49562, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T18:53:00,602 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41537 is added to blk_1073741827_1003 (size=196) 2024-11-24T18:53:00,602 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46539 is added to blk_1073741827_1003 (size=196) 2024-11-24T18:53:00,604 INFO [master/f2b92657890a:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-24T18:53:00,605 INFO [master/f2b92657890a:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-24T18:53:00,605 INFO [master/f2b92657890a:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-24T18:53:00,614 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41537 is added to blk_1073741828_1004 (size=1189) 2024-11-24T18:53:00,615 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46539 is added to blk_1073741828_1004 (size=1189) 2024-11-24T18:53:00,616 INFO [master/f2b92657890a:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:40133/user/jenkins/test-data/e2181a76-a6ea-cbf4-6f65-5ed5cb1b2424/MasterData/data/master/store 2024-11-24T18:53:00,624 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41537 is added to blk_1073741829_1005 (size=34) 2024-11-24T18:53:00,624 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46539 is added to blk_1073741829_1005 (size=34) 2024-11-24T18:53:00,625 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T18:53:00,625 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-24T18:53:00,625 INFO [master/f2b92657890a:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T18:53:00,625 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T18:53:00,625 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-24T18:53:00,625 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T18:53:00,625 INFO [master/f2b92657890a:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T18:53:00,626 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732474380625Disabling compacts and flushes for region at 1732474380625Disabling writes for close at 1732474380625Writing region close event to WAL at 1732474380625Closed at 1732474380625 2024-11-24T18:53:00,627 WARN [master/f2b92657890a:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:40133/user/jenkins/test-data/e2181a76-a6ea-cbf4-6f65-5ed5cb1b2424/MasterData/data/master/store/.initializing 2024-11-24T18:53:00,628 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:40133/user/jenkins/test-data/e2181a76-a6ea-cbf4-6f65-5ed5cb1b2424/MasterData/WALs/f2b92657890a,40329,1732474379357 2024-11-24T18:53:00,632 INFO [master/f2b92657890a:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=f2b92657890a%2C40329%2C1732474379357, suffix=, logDir=hdfs://localhost:40133/user/jenkins/test-data/e2181a76-a6ea-cbf4-6f65-5ed5cb1b2424/MasterData/WALs/f2b92657890a,40329,1732474379357, archiveDir=hdfs://localhost:40133/user/jenkins/test-data/e2181a76-a6ea-cbf4-6f65-5ed5cb1b2424/MasterData/oldWALs, maxLogs=10 2024-11-24T18:53:00,632 INFO [master/f2b92657890a:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor f2b92657890a%2C40329%2C1732474379357.1732474380632 2024-11-24T18:53:00,639 INFO [master/f2b92657890a:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/e2181a76-a6ea-cbf4-6f65-5ed5cb1b2424/MasterData/WALs/f2b92657890a,40329,1732474379357/f2b92657890a%2C40329%2C1732474379357.1732474380632 2024-11-24T18:53:00,642 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37027:37027),(127.0.0.1/127.0.0.1:44969:44969)] 2024-11-24T18:53:00,649 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-24T18:53:00,649 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T18:53:00,650 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T18:53:00,650 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T18:53:00,652 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-24T18:53:00,654 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-24T18:53:00,654 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T18:53:00,655 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T18:53:00,655 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-24T18:53:00,656 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-24T18:53:00,657 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T18:53:00,657 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T18:53:00,657 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-24T18:53:00,660 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-24T18:53:00,660 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T18:53:00,661 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T18:53:00,661 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-24T18:53:00,663 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-24T18:53:00,663 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T18:53:00,663 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T18:53:00,664 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T18:53:00,665 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40133/user/jenkins/test-data/e2181a76-a6ea-cbf4-6f65-5ed5cb1b2424/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-24T18:53:00,665 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40133/user/jenkins/test-data/e2181a76-a6ea-cbf4-6f65-5ed5cb1b2424/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-24T18:53:00,667 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T18:53:00,667 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T18:53:00,667 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-24T18:53:00,669 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T18:53:00,672 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40133/user/jenkins/test-data/e2181a76-a6ea-cbf4-6f65-5ed5cb1b2424/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-24T18:53:00,672 INFO [master/f2b92657890a:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=852664, jitterRate=0.08421845734119415}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-24T18:53:00,673 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732474380650Initializing all the Stores at 1732474380651 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732474380652 (+1 ms)Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732474380652Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732474380652Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732474380652Cleaning up temporary data from old regions at 1732474380667 (+15 ms)Region opened successfully at 1732474380673 (+6 ms) 2024-11-24T18:53:00,674 INFO [master/f2b92657890a:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-24T18:53:00,678 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@667b617b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=f2b92657890a/172.17.0.2:0 2024-11-24T18:53:00,679 INFO [master/f2b92657890a:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-24T18:53:00,679 INFO [master/f2b92657890a:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-24T18:53:00,679 INFO [master/f2b92657890a:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-24T18:53:00,679 INFO [master/f2b92657890a:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-24T18:53:00,680 INFO [master/f2b92657890a:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-24T18:53:00,680 INFO [master/f2b92657890a:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-24T18:53:00,680 INFO [master/f2b92657890a:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-24T18:53:00,683 INFO [master/f2b92657890a:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-24T18:53:00,684 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40329-0x1016e3117f40000, quorum=127.0.0.1:49562, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-24T18:53:00,724 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-24T18:53:00,724 INFO [master/f2b92657890a:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-24T18:53:00,726 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40329-0x1016e3117f40000, quorum=127.0.0.1:49562, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-24T18:53:00,851 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-24T18:53:00,853 INFO [master/f2b92657890a:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-24T18:53:00,856 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40329-0x1016e3117f40000, quorum=127.0.0.1:49562, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-24T18:53:00,933 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-24T18:53:00,935 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40329-0x1016e3117f40000, quorum=127.0.0.1:49562, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-24T18:53:01,013 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-24T18:53:01,020 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40329-0x1016e3117f40000, quorum=127.0.0.1:49562, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-24T18:53:01,092 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-24T18:53:01,171 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40329-0x1016e3117f40000, quorum=127.0.0.1:49562, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-24T18:53:01,171 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46319-0x1016e3117f40001, quorum=127.0.0.1:49562, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-24T18:53:01,172 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40329-0x1016e3117f40000, quorum=127.0.0.1:49562, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T18:53:01,172 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46319-0x1016e3117f40001, quorum=127.0.0.1:49562, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T18:53:01,174 INFO [master/f2b92657890a:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=f2b92657890a,40329,1732474379357, sessionid=0x1016e3117f40000, setting cluster-up flag (Was=false) 2024-11-24T18:53:01,349 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40329-0x1016e3117f40000, quorum=127.0.0.1:49562, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T18:53:01,349 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46319-0x1016e3117f40001, quorum=127.0.0.1:49562, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T18:53:01,476 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-24T18:53:01,478 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=f2b92657890a,40329,1732474379357 2024-11-24T18:53:01,497 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40329-0x1016e3117f40000, quorum=127.0.0.1:49562, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T18:53:01,497 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46319-0x1016e3117f40001, quorum=127.0.0.1:49562, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T18:53:01,528 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-24T18:53:01,530 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=f2b92657890a,40329,1732474379357 2024-11-24T18:53:01,531 INFO [master/f2b92657890a:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:40133/user/jenkins/test-data/e2181a76-a6ea-cbf4-6f65-5ed5cb1b2424/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-24T18:53:01,533 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-24T18:53:01,534 INFO [master/f2b92657890a:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-24T18:53:01,534 INFO [master/f2b92657890a:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-24T18:53:01,534 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: f2b92657890a,40329,1732474379357 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-24T18:53:01,536 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/f2b92657890a:0, corePoolSize=5, maxPoolSize=5 2024-11-24T18:53:01,536 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/f2b92657890a:0, corePoolSize=5, maxPoolSize=5 2024-11-24T18:53:01,536 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/f2b92657890a:0, corePoolSize=5, maxPoolSize=5 2024-11-24T18:53:01,536 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/f2b92657890a:0, corePoolSize=5, maxPoolSize=5 2024-11-24T18:53:01,536 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/f2b92657890a:0, corePoolSize=10, maxPoolSize=10 2024-11-24T18:53:01,536 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/f2b92657890a:0, corePoolSize=1, maxPoolSize=1 2024-11-24T18:53:01,536 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/f2b92657890a:0, corePoolSize=2, maxPoolSize=2 2024-11-24T18:53:01,536 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/f2b92657890a:0, corePoolSize=1, maxPoolSize=1 2024-11-24T18:53:01,537 INFO [master/f2b92657890a:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732474411537 2024-11-24T18:53:01,537 INFO [master/f2b92657890a:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-24T18:53:01,537 INFO [master/f2b92657890a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-24T18:53:01,537 INFO [master/f2b92657890a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-24T18:53:01,537 INFO [master/f2b92657890a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-24T18:53:01,538 INFO [master/f2b92657890a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-24T18:53:01,538 INFO [master/f2b92657890a:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-24T18:53:01,538 INFO [master/f2b92657890a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-24T18:53:01,538 INFO [master/f2b92657890a:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-24T18:53:01,538 INFO [master/f2b92657890a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-24T18:53:01,538 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-24T18:53:01,538 INFO [master/f2b92657890a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-24T18:53:01,538 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-24T18:53:01,539 INFO [master/f2b92657890a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-24T18:53:01,539 INFO [master/f2b92657890a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-24T18:53:01,539 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/f2b92657890a:0:becomeActiveMaster-HFileCleaner.large.0-1732474381539,5,FailOnTimeoutGroup] 2024-11-24T18:53:01,539 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/f2b92657890a:0:becomeActiveMaster-HFileCleaner.small.0-1732474381539,5,FailOnTimeoutGroup] 2024-11-24T18:53:01,539 INFO [master/f2b92657890a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-24T18:53:01,539 INFO [master/f2b92657890a:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-24T18:53:01,539 INFO [master/f2b92657890a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-24T18:53:01,540 INFO [master/f2b92657890a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-24T18:53:01,540 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T18:53:01,540 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-24T18:53:01,548 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46539 is added to blk_1073741831_1007 (size=1321) 2024-11-24T18:53:01,548 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41537 is added to blk_1073741831_1007 (size=1321) 2024-11-24T18:53:01,549 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:40133/user/jenkins/test-data/e2181a76-a6ea-cbf4-6f65-5ed5cb1b2424/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-24T18:53:01,550 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:40133/user/jenkins/test-data/e2181a76-a6ea-cbf4-6f65-5ed5cb1b2424 2024-11-24T18:53:01,560 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46539 is added to blk_1073741832_1008 (size=32) 2024-11-24T18:53:01,561 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41537 is added to blk_1073741832_1008 (size=32) 2024-11-24T18:53:01,562 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T18:53:01,563 INFO [RS:0;f2b92657890a:46319 {}] regionserver.HRegionServer(746): ClusterId : fb304e74-6c5b-4d0b-9658-fa1a93502d99 2024-11-24T18:53:01,564 DEBUG [RS:0;f2b92657890a:46319 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-24T18:53:01,571 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-24T18:53:01,572 DEBUG [RS:0;f2b92657890a:46319 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-24T18:53:01,572 DEBUG [RS:0;f2b92657890a:46319 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-24T18:53:01,574 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-24T18:53:01,574 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T18:53:01,575 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T18:53:01,575 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-24T18:53:01,577 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-24T18:53:01,577 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T18:53:01,578 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T18:53:01,578 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-24T18:53:01,581 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-24T18:53:01,581 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T18:53:01,582 DEBUG [RS:0;f2b92657890a:46319 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-24T18:53:01,582 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T18:53:01,582 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-24T18:53:01,583 DEBUG [RS:0;f2b92657890a:46319 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4da86b85, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=f2b92657890a/172.17.0.2:0 2024-11-24T18:53:01,586 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-24T18:53:01,586 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T18:53:01,591 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T18:53:01,591 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-24T18:53:01,593 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40133/user/jenkins/test-data/e2181a76-a6ea-cbf4-6f65-5ed5cb1b2424/data/hbase/meta/1588230740 2024-11-24T18:53:01,593 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40133/user/jenkins/test-data/e2181a76-a6ea-cbf4-6f65-5ed5cb1b2424/data/hbase/meta/1588230740 2024-11-24T18:53:01,596 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-24T18:53:01,596 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-24T18:53:01,597 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-24T18:53:01,599 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-24T18:53:01,602 DEBUG [RS:0;f2b92657890a:46319 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;f2b92657890a:46319 2024-11-24T18:53:01,603 INFO [RS:0;f2b92657890a:46319 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-24T18:53:01,603 INFO [RS:0;f2b92657890a:46319 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-24T18:53:01,603 DEBUG [RS:0;f2b92657890a:46319 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-24T18:53:01,604 INFO [RS:0;f2b92657890a:46319 {}] regionserver.HRegionServer(2659): reportForDuty to master=f2b92657890a,40329,1732474379357 with port=46319, startcode=1732474380044 2024-11-24T18:53:01,605 DEBUG [RS:0;f2b92657890a:46319 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-24T18:53:01,609 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40133/user/jenkins/test-data/e2181a76-a6ea-cbf4-6f65-5ed5cb1b2424/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-24T18:53:01,610 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=818082, jitterRate=0.04024602472782135}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-24T18:53:01,611 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732474381562Initializing all the Stores at 1732474381563 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732474381564 (+1 ms)Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732474381568 (+4 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732474381568Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732474381568Cleaning up temporary data from old regions at 1732474381596 (+28 ms)Region opened successfully at 1732474381611 (+15 ms) 2024-11-24T18:53:01,611 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-24T18:53:01,611 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-24T18:53:01,611 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-24T18:53:01,611 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-24T18:53:01,611 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-24T18:53:01,617 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-24T18:53:01,617 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732474381611Disabling compacts and flushes for region at 1732474381611Disabling writes for close at 1732474381611Writing region close event to WAL at 1732474381617 (+6 ms)Closed at 1732474381617 2024-11-24T18:53:01,622 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-24T18:53:01,623 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-24T18:53:01,623 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-24T18:53:01,626 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-24T18:53:01,628 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-24T18:53:01,633 INFO [HMaster-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38165, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-11-24T18:53:01,634 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40329 {}] master.ServerManager(363): Checking decommissioned status of RegionServer f2b92657890a,46319,1732474380044 2024-11-24T18:53:01,634 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40329 {}] master.ServerManager(517): Registering regionserver=f2b92657890a,46319,1732474380044 2024-11-24T18:53:01,637 DEBUG [RS:0;f2b92657890a:46319 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:40133/user/jenkins/test-data/e2181a76-a6ea-cbf4-6f65-5ed5cb1b2424 2024-11-24T18:53:01,637 DEBUG [RS:0;f2b92657890a:46319 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:40133 2024-11-24T18:53:01,637 DEBUG [RS:0;f2b92657890a:46319 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-24T18:53:01,644 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40329-0x1016e3117f40000, quorum=127.0.0.1:49562, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-24T18:53:01,645 DEBUG [RS:0;f2b92657890a:46319 {}] zookeeper.ZKUtil(111): regionserver:46319-0x1016e3117f40001, quorum=127.0.0.1:49562, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/f2b92657890a,46319,1732474380044 2024-11-24T18:53:01,645 WARN [RS:0;f2b92657890a:46319 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-24T18:53:01,645 INFO [RS:0;f2b92657890a:46319 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-24T18:53:01,645 DEBUG [RS:0;f2b92657890a:46319 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:40133/user/jenkins/test-data/e2181a76-a6ea-cbf4-6f65-5ed5cb1b2424/WALs/f2b92657890a,46319,1732474380044 2024-11-24T18:53:01,652 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [f2b92657890a,46319,1732474380044] 2024-11-24T18:53:01,656 INFO [RS:0;f2b92657890a:46319 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-24T18:53:01,660 INFO [RS:0;f2b92657890a:46319 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-24T18:53:01,664 INFO [RS:0;f2b92657890a:46319 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-24T18:53:01,665 INFO [RS:0;f2b92657890a:46319 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T18:53:01,668 INFO [RS:0;f2b92657890a:46319 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-24T18:53:01,670 INFO [RS:0;f2b92657890a:46319 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-24T18:53:01,670 INFO [RS:0;f2b92657890a:46319 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-24T18:53:01,671 DEBUG [RS:0;f2b92657890a:46319 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/f2b92657890a:0, corePoolSize=1, maxPoolSize=1 2024-11-24T18:53:01,671 DEBUG [RS:0;f2b92657890a:46319 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/f2b92657890a:0, corePoolSize=1, maxPoolSize=1 2024-11-24T18:53:01,671 DEBUG [RS:0;f2b92657890a:46319 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/f2b92657890a:0, corePoolSize=1, maxPoolSize=1 2024-11-24T18:53:01,671 DEBUG [RS:0;f2b92657890a:46319 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/f2b92657890a:0, corePoolSize=1, maxPoolSize=1 2024-11-24T18:53:01,671 DEBUG [RS:0;f2b92657890a:46319 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/f2b92657890a:0, corePoolSize=1, maxPoolSize=1 2024-11-24T18:53:01,671 DEBUG [RS:0;f2b92657890a:46319 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/f2b92657890a:0, corePoolSize=2, maxPoolSize=2 2024-11-24T18:53:01,671 DEBUG [RS:0;f2b92657890a:46319 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/f2b92657890a:0, corePoolSize=1, maxPoolSize=1 2024-11-24T18:53:01,672 DEBUG [RS:0;f2b92657890a:46319 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/f2b92657890a:0, corePoolSize=1, maxPoolSize=1 2024-11-24T18:53:01,672 DEBUG [RS:0;f2b92657890a:46319 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/f2b92657890a:0, corePoolSize=1, maxPoolSize=1 2024-11-24T18:53:01,672 DEBUG [RS:0;f2b92657890a:46319 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/f2b92657890a:0, corePoolSize=1, maxPoolSize=1 2024-11-24T18:53:01,672 DEBUG [RS:0;f2b92657890a:46319 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/f2b92657890a:0, corePoolSize=1, maxPoolSize=1 2024-11-24T18:53:01,672 DEBUG [RS:0;f2b92657890a:46319 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/f2b92657890a:0, corePoolSize=1, maxPoolSize=1 2024-11-24T18:53:01,672 DEBUG [RS:0;f2b92657890a:46319 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/f2b92657890a:0, corePoolSize=3, maxPoolSize=3 2024-11-24T18:53:01,672 DEBUG [RS:0;f2b92657890a:46319 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/f2b92657890a:0, corePoolSize=3, maxPoolSize=3 2024-11-24T18:53:01,673 INFO [RS:0;f2b92657890a:46319 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-24T18:53:01,673 INFO [RS:0;f2b92657890a:46319 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-24T18:53:01,673 INFO [RS:0;f2b92657890a:46319 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T18:53:01,673 INFO [RS:0;f2b92657890a:46319 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-24T18:53:01,673 INFO [RS:0;f2b92657890a:46319 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-24T18:53:01,673 INFO [RS:0;f2b92657890a:46319 {}] hbase.ChoreService(168): Chore ScheduledChore name=f2b92657890a,46319,1732474380044-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-24T18:53:01,696 INFO [RS:0;f2b92657890a:46319 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-24T18:53:01,697 INFO [RS:0;f2b92657890a:46319 {}] hbase.ChoreService(168): Chore ScheduledChore name=f2b92657890a,46319,1732474380044-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T18:53:01,697 INFO [RS:0;f2b92657890a:46319 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T18:53:01,697 INFO [RS:0;f2b92657890a:46319 {}] regionserver.Replication(171): f2b92657890a,46319,1732474380044 started 2024-11-24T18:53:01,718 INFO [RS:0;f2b92657890a:46319 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T18:53:01,718 INFO [RS:0;f2b92657890a:46319 {}] regionserver.HRegionServer(1482): Serving as f2b92657890a,46319,1732474380044, RpcServer on f2b92657890a/172.17.0.2:46319, sessionid=0x1016e3117f40001 2024-11-24T18:53:01,719 DEBUG [RS:0;f2b92657890a:46319 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-24T18:53:01,719 DEBUG [RS:0;f2b92657890a:46319 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager f2b92657890a,46319,1732474380044 2024-11-24T18:53:01,719 DEBUG [RS:0;f2b92657890a:46319 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'f2b92657890a,46319,1732474380044' 2024-11-24T18:53:01,719 DEBUG [RS:0;f2b92657890a:46319 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-24T18:53:01,720 DEBUG [RS:0;f2b92657890a:46319 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-24T18:53:01,720 DEBUG [RS:0;f2b92657890a:46319 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-24T18:53:01,720 DEBUG [RS:0;f2b92657890a:46319 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-24T18:53:01,720 DEBUG [RS:0;f2b92657890a:46319 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager f2b92657890a,46319,1732474380044 2024-11-24T18:53:01,720 DEBUG [RS:0;f2b92657890a:46319 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'f2b92657890a,46319,1732474380044' 2024-11-24T18:53:01,720 DEBUG [RS:0;f2b92657890a:46319 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-24T18:53:01,721 DEBUG [RS:0;f2b92657890a:46319 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-24T18:53:01,722 DEBUG [RS:0;f2b92657890a:46319 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-24T18:53:01,722 INFO [RS:0;f2b92657890a:46319 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-24T18:53:01,722 INFO [RS:0;f2b92657890a:46319 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-24T18:53:01,778 WARN [f2b92657890a:40329 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-24T18:53:01,825 INFO [RS:0;f2b92657890a:46319 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=f2b92657890a%2C46319%2C1732474380044, suffix=, logDir=hdfs://localhost:40133/user/jenkins/test-data/e2181a76-a6ea-cbf4-6f65-5ed5cb1b2424/WALs/f2b92657890a,46319,1732474380044, archiveDir=hdfs://localhost:40133/user/jenkins/test-data/e2181a76-a6ea-cbf4-6f65-5ed5cb1b2424/oldWALs, maxLogs=32 2024-11-24T18:53:01,828 INFO [RS:0;f2b92657890a:46319 {}] monitor.StreamSlowMonitor(122): New stream slow monitor f2b92657890a%2C46319%2C1732474380044.1732474381827 2024-11-24T18:53:01,837 INFO [RS:0;f2b92657890a:46319 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/e2181a76-a6ea-cbf4-6f65-5ed5cb1b2424/WALs/f2b92657890a,46319,1732474380044/f2b92657890a%2C46319%2C1732474380044.1732474381827 2024-11-24T18:53:01,840 DEBUG [RS:0;f2b92657890a:46319 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44969:44969),(127.0.0.1/127.0.0.1:37027:37027)] 2024-11-24T18:53:02,029 DEBUG [f2b92657890a:40329 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-24T18:53:02,030 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=f2b92657890a,46319,1732474380044 2024-11-24T18:53:02,034 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as f2b92657890a,46319,1732474380044, state=OPENING 2024-11-24T18:53:02,086 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-24T18:53:02,097 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46319-0x1016e3117f40001, quorum=127.0.0.1:49562, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T18:53:02,097 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40329-0x1016e3117f40000, quorum=127.0.0.1:49562, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T18:53:02,098 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-24T18:53:02,098 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-24T18:53:02,098 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-24T18:53:02,098 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=f2b92657890a,46319,1732474380044}] 2024-11-24T18:53:02,255 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-24T18:53:02,260 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44685, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-24T18:53:02,266 INFO [RS_OPEN_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-24T18:53:02,266 INFO [RS_OPEN_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-24T18:53:02,269 INFO [RS_OPEN_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=f2b92657890a%2C46319%2C1732474380044.meta, suffix=.meta, logDir=hdfs://localhost:40133/user/jenkins/test-data/e2181a76-a6ea-cbf4-6f65-5ed5cb1b2424/WALs/f2b92657890a,46319,1732474380044, archiveDir=hdfs://localhost:40133/user/jenkins/test-data/e2181a76-a6ea-cbf4-6f65-5ed5cb1b2424/oldWALs, maxLogs=32 2024-11-24T18:53:02,271 INFO [RS_OPEN_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor f2b92657890a%2C46319%2C1732474380044.meta.1732474382271.meta 2024-11-24T18:53:02,278 INFO [RS_OPEN_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/e2181a76-a6ea-cbf4-6f65-5ed5cb1b2424/WALs/f2b92657890a,46319,1732474380044/f2b92657890a%2C46319%2C1732474380044.meta.1732474382271.meta 2024-11-24T18:53:02,280 DEBUG [RS_OPEN_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37027:37027),(127.0.0.1/127.0.0.1:44969:44969)] 2024-11-24T18:53:02,285 DEBUG [RS_OPEN_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-24T18:53:02,285 DEBUG [RS_OPEN_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-24T18:53:02,285 DEBUG [RS_OPEN_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-24T18:53:02,286 INFO [RS_OPEN_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-24T18:53:02,286 DEBUG [RS_OPEN_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-24T18:53:02,286 DEBUG [RS_OPEN_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T18:53:02,286 DEBUG [RS_OPEN_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-24T18:53:02,286 DEBUG [RS_OPEN_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-24T18:53:02,289 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-24T18:53:02,290 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-24T18:53:02,290 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T18:53:02,291 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T18:53:02,291 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-24T18:53:02,293 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-24T18:53:02,293 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T18:53:02,294 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T18:53:02,295 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-24T18:53:02,296 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-24T18:53:02,296 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T18:53:02,297 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T18:53:02,297 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-24T18:53:02,299 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-24T18:53:02,299 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T18:53:02,299 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T18:53:02,300 DEBUG [RS_OPEN_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-24T18:53:02,301 DEBUG [RS_OPEN_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40133/user/jenkins/test-data/e2181a76-a6ea-cbf4-6f65-5ed5cb1b2424/data/hbase/meta/1588230740 2024-11-24T18:53:02,303 DEBUG [RS_OPEN_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40133/user/jenkins/test-data/e2181a76-a6ea-cbf4-6f65-5ed5cb1b2424/data/hbase/meta/1588230740 2024-11-24T18:53:02,304 DEBUG [RS_OPEN_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-24T18:53:02,304 DEBUG [RS_OPEN_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-24T18:53:02,305 DEBUG [RS_OPEN_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-24T18:53:02,307 DEBUG [RS_OPEN_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-24T18:53:02,308 INFO [RS_OPEN_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=827624, jitterRate=0.05237850546836853}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-24T18:53:02,308 DEBUG [RS_OPEN_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-24T18:53:02,309 DEBUG [RS_OPEN_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732474382287Writing region info on filesystem at 1732474382287Initializing all the Stores at 1732474382288 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732474382288Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732474382288Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732474382288Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732474382288Cleaning up temporary data from old regions at 1732474382304 (+16 ms)Running coprocessor post-open hooks at 1732474382308 (+4 ms)Region opened successfully at 1732474382309 (+1 ms) 2024-11-24T18:53:02,311 INFO [RS_OPEN_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732474382254 2024-11-24T18:53:02,314 DEBUG [RS_OPEN_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-24T18:53:02,314 INFO [RS_OPEN_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-24T18:53:02,316 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=f2b92657890a,46319,1732474380044 2024-11-24T18:53:02,318 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as f2b92657890a,46319,1732474380044, state=OPEN 2024-11-24T18:53:02,457 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46319-0x1016e3117f40001, quorum=127.0.0.1:49562, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-24T18:53:02,457 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40329-0x1016e3117f40000, quorum=127.0.0.1:49562, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-24T18:53:02,457 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=f2b92657890a,46319,1732474380044 2024-11-24T18:53:02,457 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-24T18:53:02,457 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-24T18:53:02,462 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-24T18:53:02,462 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=f2b92657890a,46319,1732474380044 in 359 msec 2024-11-24T18:53:02,466 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-24T18:53:02,466 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 839 msec 2024-11-24T18:53:02,467 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-24T18:53:02,467 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-24T18:53:02,469 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-24T18:53:02,469 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=f2b92657890a,46319,1732474380044, seqNum=-1] 2024-11-24T18:53:02,470 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-24T18:53:02,472 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53967, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-24T18:53:02,481 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 945 msec 2024-11-24T18:53:02,481 INFO [master/f2b92657890a:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732474382481, completionTime=-1 2024-11-24T18:53:02,481 INFO [master/f2b92657890a:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-24T18:53:02,481 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-24T18:53:02,483 INFO [master/f2b92657890a:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-24T18:53:02,483 INFO [master/f2b92657890a:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732474442483 2024-11-24T18:53:02,483 INFO [master/f2b92657890a:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732474502483 2024-11-24T18:53:02,484 INFO [master/f2b92657890a:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-11-24T18:53:02,484 INFO [master/f2b92657890a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=f2b92657890a,40329,1732474379357-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T18:53:02,484 INFO [master/f2b92657890a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=f2b92657890a,40329,1732474379357-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T18:53:02,484 INFO [master/f2b92657890a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=f2b92657890a,40329,1732474379357-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T18:53:02,484 INFO [master/f2b92657890a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-f2b92657890a:40329, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T18:53:02,484 INFO [master/f2b92657890a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-24T18:53:02,484 INFO [master/f2b92657890a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-24T18:53:02,487 DEBUG [master/f2b92657890a:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-24T18:53:02,490 INFO [master/f2b92657890a:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.989sec 2024-11-24T18:53:02,490 INFO [master/f2b92657890a:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-24T18:53:02,490 INFO [master/f2b92657890a:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-24T18:53:02,490 INFO [master/f2b92657890a:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-24T18:53:02,490 INFO [master/f2b92657890a:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-24T18:53:02,490 INFO [master/f2b92657890a:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-24T18:53:02,490 INFO [master/f2b92657890a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=f2b92657890a,40329,1732474379357-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-24T18:53:02,490 INFO [master/f2b92657890a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=f2b92657890a,40329,1732474379357-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-24T18:53:02,493 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-24T18:53:02,493 INFO [master/f2b92657890a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-24T18:53:02,493 INFO [master/f2b92657890a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=f2b92657890a,40329,1732474379357-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T18:53:02,564 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3aadf114, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-24T18:53:02,564 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request f2b92657890a,40329,-1 for getting cluster id 2024-11-24T18:53:02,565 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-24T18:53:02,567 DEBUG [HMaster-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'fb304e74-6c5b-4d0b-9658-fa1a93502d99' 2024-11-24T18:53:02,568 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-24T18:53:02,568 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "fb304e74-6c5b-4d0b-9658-fa1a93502d99" 2024-11-24T18:53:02,568 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@657bf00c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-24T18:53:02,569 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [f2b92657890a,40329,-1] 2024-11-24T18:53:02,569 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-24T18:53:02,569 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T18:53:02,571 INFO [HMaster-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53784, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-24T18:53:02,573 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@510cc70d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-24T18:53:02,574 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-24T18:53:02,575 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=f2b92657890a,46319,1732474380044, seqNum=-1] 2024-11-24T18:53:02,576 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-24T18:53:02,578 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52396, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-24T18:53:02,580 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=f2b92657890a,40329,1732474379357 2024-11-24T18:53:02,581 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T18:53:02,586 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-24T18:53:02,586 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-24T18:53:02,586 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-24T18:53:02,587 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at org.apache.hadoop.hbase.regionserver.wal.TestLogRolling.testLogRollOnDatanodeDeath(TestLogRolling.java:201) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-24T18:53:02,587 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T18:53:02,587 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T18:53:02,587 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-24T18:53:02,587 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-24T18:53:02,587 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=355378974, stopped=false 2024-11-24T18:53:02,587 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=f2b92657890a,40329,1732474379357 2024-11-24T18:53:02,610 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40329-0x1016e3117f40000, quorum=127.0.0.1:49562, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-24T18:53:02,610 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46319-0x1016e3117f40001, quorum=127.0.0.1:49562, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-24T18:53:02,610 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40329-0x1016e3117f40000, quorum=127.0.0.1:49562, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T18:53:02,610 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46319-0x1016e3117f40001, quorum=127.0.0.1:49562, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T18:53:02,611 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-24T18:53:02,611 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-24T18:53:02,611 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at org.apache.hadoop.hbase.regionserver.wal.TestLogRolling.testLogRollOnDatanodeDeath(TestLogRolling.java:201) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-24T18:53:02,611 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T18:53:02,611 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:46319-0x1016e3117f40001, quorum=127.0.0.1:49562, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-24T18:53:02,611 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'f2b92657890a,46319,1732474380044' ***** 2024-11-24T18:53:02,611 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-24T18:53:02,612 INFO [RS:0;f2b92657890a:46319 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-24T18:53:02,612 INFO [RS:0;f2b92657890a:46319 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-24T18:53:02,612 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-24T18:53:02,612 INFO [RS:0;f2b92657890a:46319 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-24T18:53:02,612 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:40329-0x1016e3117f40000, quorum=127.0.0.1:49562, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-24T18:53:02,612 INFO [RS:0;f2b92657890a:46319 {}] regionserver.HRegionServer(959): stopping server f2b92657890a,46319,1732474380044 2024-11-24T18:53:02,612 INFO [RS:0;f2b92657890a:46319 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-24T18:53:02,612 INFO [RS:0;f2b92657890a:46319 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;f2b92657890a:46319. 2024-11-24T18:53:02,612 DEBUG [RS:0;f2b92657890a:46319 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-24T18:53:02,612 DEBUG [RS:0;f2b92657890a:46319 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T18:53:02,612 INFO [RS:0;f2b92657890a:46319 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-24T18:53:02,612 INFO [RS:0;f2b92657890a:46319 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-24T18:53:02,612 INFO [RS:0;f2b92657890a:46319 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-24T18:53:02,613 INFO [RS:0;f2b92657890a:46319 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-24T18:53:02,613 INFO [RS:0;f2b92657890a:46319 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-24T18:53:02,613 DEBUG [RS:0;f2b92657890a:46319 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-11-24T18:53:02,613 DEBUG [RS_CLOSE_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-24T18:53:02,613 DEBUG [RS:0;f2b92657890a:46319 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-24T18:53:02,613 INFO [RS_CLOSE_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-24T18:53:02,613 DEBUG [RS_CLOSE_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-24T18:53:02,613 DEBUG [RS_CLOSE_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-24T18:53:02,613 DEBUG [RS_CLOSE_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-24T18:53:02,613 INFO [RS_CLOSE_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=74 B heapSize=1.22 KB 2024-11-24T18:53:02,635 DEBUG [RS_CLOSE_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40133/user/jenkins/test-data/e2181a76-a6ea-cbf4-6f65-5ed5cb1b2424/data/hbase/meta/1588230740/.tmp/ns/4e5b59c0bf4c4c93991d6a5ef7d51938 is 43, key is default/ns:d/1732474382472/Put/seqid=0 2024-11-24T18:53:02,641 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41537 is added to blk_1073741835_1011 (size=5153) 2024-11-24T18:53:02,641 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46539 is added to blk_1073741835_1011 (size=5153) 2024-11-24T18:53:02,643 INFO [RS_CLOSE_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:40133/user/jenkins/test-data/e2181a76-a6ea-cbf4-6f65-5ed5cb1b2424/data/hbase/meta/1588230740/.tmp/ns/4e5b59c0bf4c4c93991d6a5ef7d51938 2024-11-24T18:53:02,651 DEBUG [RS_CLOSE_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40133/user/jenkins/test-data/e2181a76-a6ea-cbf4-6f65-5ed5cb1b2424/data/hbase/meta/1588230740/.tmp/ns/4e5b59c0bf4c4c93991d6a5ef7d51938 as hdfs://localhost:40133/user/jenkins/test-data/e2181a76-a6ea-cbf4-6f65-5ed5cb1b2424/data/hbase/meta/1588230740/ns/4e5b59c0bf4c4c93991d6a5ef7d51938 2024-11-24T18:53:02,661 INFO [RS_CLOSE_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40133/user/jenkins/test-data/e2181a76-a6ea-cbf4-6f65-5ed5cb1b2424/data/hbase/meta/1588230740/ns/4e5b59c0bf4c4c93991d6a5ef7d51938, entries=2, sequenceid=6, filesize=5.0 K 2024-11-24T18:53:02,662 INFO [RS_CLOSE_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 49ms, sequenceid=6, compaction requested=false 2024-11-24T18:53:02,663 DEBUG [RS_CLOSE_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-24T18:53:02,669 DEBUG [RS_CLOSE_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40133/user/jenkins/test-data/e2181a76-a6ea-cbf4-6f65-5ed5cb1b2424/data/hbase/meta/1588230740/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-24T18:53:02,670 DEBUG [RS_CLOSE_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-24T18:53:02,670 INFO [RS_CLOSE_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-24T18:53:02,670 DEBUG [RS_CLOSE_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732474382613Running coprocessor pre-close hooks at 1732474382613Disabling compacts and flushes for region at 1732474382613Disabling writes for close at 1732474382613Obtaining lock to block concurrent updates at 1732474382613Preparing flush snapshotting stores in 1588230740 at 1732474382613Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=74, getHeapSize=1184, getOffHeapSize=0, getCellsCount=2 at 1732474382614 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1732474382615 (+1 ms)Flushing 1588230740/ns: creating writer at 1732474382615Flushing 1588230740/ns: appending metadata at 1732474382635 (+20 ms)Flushing 1588230740/ns: closing flushed file at 1732474382635Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@415fd168: reopening flushed file at 1732474382650 (+15 ms)Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 49ms, sequenceid=6, compaction requested=false at 1732474382663 (+13 ms)Writing region close event to WAL at 1732474382665 (+2 ms)Running coprocessor post-close hooks at 1732474382670 (+5 ms)Closed at 1732474382670 2024-11-24T18:53:02,671 DEBUG [RS_CLOSE_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-24T18:53:02,691 INFO [regionserver/f2b92657890a:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-24T18:53:02,692 INFO [regionserver/f2b92657890a:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-24T18:53:02,813 INFO [RS:0;f2b92657890a:46319 {}] regionserver.HRegionServer(976): stopping server f2b92657890a,46319,1732474380044; all regions closed. 2024-11-24T18:53:02,814 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:53:02,814 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:53:02,814 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:53:02,814 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:53:02,815 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:53:02,817 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46539 is added to blk_1073741834_1010 (size=1152) 2024-11-24T18:53:02,819 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41537 is added to blk_1073741834_1010 (size=1152) 2024-11-24T18:53:02,821 DEBUG [RS:0;f2b92657890a:46319 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/e2181a76-a6ea-cbf4-6f65-5ed5cb1b2424/oldWALs 2024-11-24T18:53:02,822 INFO [RS:0;f2b92657890a:46319 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog f2b92657890a%2C46319%2C1732474380044.meta:.meta(num 1732474382271) 2024-11-24T18:53:02,822 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:53:02,822 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:53:02,822 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:53:02,822 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:53:02,823 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:53:02,825 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41537 is added to blk_1073741833_1009 (size=93) 2024-11-24T18:53:02,825 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46539 is added to blk_1073741833_1009 (size=93) 2024-11-24T18:53:02,828 DEBUG [RS:0;f2b92657890a:46319 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/e2181a76-a6ea-cbf4-6f65-5ed5cb1b2424/oldWALs 2024-11-24T18:53:02,828 INFO [RS:0;f2b92657890a:46319 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog f2b92657890a%2C46319%2C1732474380044:(num 1732474381827) 2024-11-24T18:53:02,828 DEBUG [RS:0;f2b92657890a:46319 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T18:53:02,828 INFO [RS:0;f2b92657890a:46319 {}] regionserver.LeaseManager(133): Closed leases 2024-11-24T18:53:02,828 INFO [RS:0;f2b92657890a:46319 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-24T18:53:02,828 INFO [RS:0;f2b92657890a:46319 {}] hbase.ChoreService(370): Chore service for: regionserver/f2b92657890a:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-24T18:53:02,828 INFO [RS:0;f2b92657890a:46319 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-24T18:53:02,828 INFO [regionserver/f2b92657890a:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-24T18:53:02,828 INFO [RS:0;f2b92657890a:46319 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:46319 2024-11-24T18:53:02,842 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40329-0x1016e3117f40000, quorum=127.0.0.1:49562, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-24T18:53:02,842 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46319-0x1016e3117f40001, quorum=127.0.0.1:49562, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/f2b92657890a,46319,1732474380044 2024-11-24T18:53:02,842 INFO [RS:0;f2b92657890a:46319 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-24T18:53:02,854 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [f2b92657890a,46319,1732474380044] 2024-11-24T18:53:02,865 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/f2b92657890a,46319,1732474380044 already deleted, retry=false 2024-11-24T18:53:02,865 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; f2b92657890a,46319,1732474380044 expired; onlineServers=0 2024-11-24T18:53:02,865 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'f2b92657890a,40329,1732474379357' ***** 2024-11-24T18:53:02,865 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-24T18:53:02,865 INFO [M:0;f2b92657890a:40329 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-24T18:53:02,865 INFO [M:0;f2b92657890a:40329 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-24T18:53:02,865 DEBUG [M:0;f2b92657890a:40329 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-24T18:53:02,866 DEBUG [M:0;f2b92657890a:40329 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-24T18:53:02,866 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-24T18:53:02,866 DEBUG [master/f2b92657890a:0:becomeActiveMaster-HFileCleaner.large.0-1732474381539 {}] cleaner.HFileCleaner(306): Exit Thread[master/f2b92657890a:0:becomeActiveMaster-HFileCleaner.large.0-1732474381539,5,FailOnTimeoutGroup] 2024-11-24T18:53:02,866 DEBUG [master/f2b92657890a:0:becomeActiveMaster-HFileCleaner.small.0-1732474381539 {}] cleaner.HFileCleaner(306): Exit Thread[master/f2b92657890a:0:becomeActiveMaster-HFileCleaner.small.0-1732474381539,5,FailOnTimeoutGroup] 2024-11-24T18:53:02,866 INFO [M:0;f2b92657890a:40329 {}] hbase.ChoreService(370): Chore service for: master/f2b92657890a:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-24T18:53:02,866 INFO [M:0;f2b92657890a:40329 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-24T18:53:02,866 DEBUG [M:0;f2b92657890a:40329 {}] master.HMaster(1795): Stopping service threads 2024-11-24T18:53:02,866 INFO [M:0;f2b92657890a:40329 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-24T18:53:02,866 INFO [M:0;f2b92657890a:40329 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-24T18:53:02,866 INFO [M:0;f2b92657890a:40329 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-24T18:53:02,866 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-24T18:53:02,875 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40329-0x1016e3117f40000, quorum=127.0.0.1:49562, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-24T18:53:02,875 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40329-0x1016e3117f40000, quorum=127.0.0.1:49562, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T18:53:02,875 DEBUG [M:0;f2b92657890a:40329 {}] zookeeper.ZKUtil(347): master:40329-0x1016e3117f40000, quorum=127.0.0.1:49562, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-24T18:53:02,876 WARN [M:0;f2b92657890a:40329 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-24T18:53:02,876 INFO [M:0;f2b92657890a:40329 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:40133/user/jenkins/test-data/e2181a76-a6ea-cbf4-6f65-5ed5cb1b2424/.lastflushedseqids 2024-11-24T18:53:02,883 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46539 is added to blk_1073741836_1012 (size=99) 2024-11-24T18:53:02,883 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41537 is added to blk_1073741836_1012 (size=99) 2024-11-24T18:53:02,884 INFO [M:0;f2b92657890a:40329 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-24T18:53:02,884 INFO [M:0;f2b92657890a:40329 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-24T18:53:02,884 DEBUG [M:0;f2b92657890a:40329 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-24T18:53:02,884 INFO [M:0;f2b92657890a:40329 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T18:53:02,884 DEBUG [M:0;f2b92657890a:40329 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T18:53:02,884 DEBUG [M:0;f2b92657890a:40329 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-24T18:53:02,884 DEBUG [M:0;f2b92657890a:40329 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T18:53:02,884 INFO [M:0;f2b92657890a:40329 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=7.67 KB heapSize=11.34 KB 2024-11-24T18:53:02,900 DEBUG [M:0;f2b92657890a:40329 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40133/user/jenkins/test-data/e2181a76-a6ea-cbf4-6f65-5ed5cb1b2424/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/d52e1f7983e44ddc866075226b3a0229 is 82, key is hbase:meta,,1/info:regioninfo/1732474382316/Put/seqid=0 2024-11-24T18:53:02,906 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41537 is added to blk_1073741837_1013 (size=5672) 2024-11-24T18:53:02,906 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46539 is added to blk_1073741837_1013 (size=5672) 2024-11-24T18:53:02,906 INFO [M:0;f2b92657890a:40329 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:40133/user/jenkins/test-data/e2181a76-a6ea-cbf4-6f65-5ed5cb1b2424/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/d52e1f7983e44ddc866075226b3a0229 2024-11-24T18:53:02,928 DEBUG [M:0;f2b92657890a:40329 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40133/user/jenkins/test-data/e2181a76-a6ea-cbf4-6f65-5ed5cb1b2424/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/aad60045ea9a4cd0b5aa63f3d5e0c281 is 240, key is \x00\x00\x00\x00\x00\x00\x00\x01/proc:d/1732474382479/Put/seqid=0 2024-11-24T18:53:02,934 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41537 is added to blk_1073741838_1014 (size=5275) 2024-11-24T18:53:02,934 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46539 is added to blk_1073741838_1014 (size=5275) 2024-11-24T18:53:02,935 INFO [M:0;f2b92657890a:40329 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.06 KB at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:40133/user/jenkins/test-data/e2181a76-a6ea-cbf4-6f65-5ed5cb1b2424/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/aad60045ea9a4cd0b5aa63f3d5e0c281 2024-11-24T18:53:02,955 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46319-0x1016e3117f40001, quorum=127.0.0.1:49562, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-24T18:53:02,955 INFO [RS:0;f2b92657890a:46319 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-24T18:53:02,955 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46319-0x1016e3117f40001, quorum=127.0.0.1:49562, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-24T18:53:02,955 INFO [RS:0;f2b92657890a:46319 {}] regionserver.HRegionServer(1031): Exiting; stopping=f2b92657890a,46319,1732474380044; zookeeper connection closed. 2024-11-24T18:53:02,955 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@77578224 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@77578224 2024-11-24T18:53:02,955 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-24T18:53:02,955 DEBUG [M:0;f2b92657890a:40329 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40133/user/jenkins/test-data/e2181a76-a6ea-cbf4-6f65-5ed5cb1b2424/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/2420c47eaba04353b8a3fa6f2aa10020 is 69, key is f2b92657890a,46319,1732474380044/rs:state/1732474381634/Put/seqid=0 2024-11-24T18:53:02,960 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41537 is added to blk_1073741839_1015 (size=5156) 2024-11-24T18:53:02,961 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46539 is added to blk_1073741839_1015 (size=5156) 2024-11-24T18:53:02,961 INFO [M:0;f2b92657890a:40329 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:40133/user/jenkins/test-data/e2181a76-a6ea-cbf4-6f65-5ed5cb1b2424/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/2420c47eaba04353b8a3fa6f2aa10020 2024-11-24T18:53:02,982 DEBUG [M:0;f2b92657890a:40329 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40133/user/jenkins/test-data/e2181a76-a6ea-cbf4-6f65-5ed5cb1b2424/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/118e1b4c4d70479a93b9380ae1246c36 is 52, key is load_balancer_on/state:d/1732474382585/Put/seqid=0 2024-11-24T18:53:02,987 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46539 is added to blk_1073741840_1016 (size=5056) 2024-11-24T18:53:02,987 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41537 is added to blk_1073741840_1016 (size=5056) 2024-11-24T18:53:02,988 INFO [M:0;f2b92657890a:40329 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:40133/user/jenkins/test-data/e2181a76-a6ea-cbf4-6f65-5ed5cb1b2424/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/118e1b4c4d70479a93b9380ae1246c36 2024-11-24T18:53:02,994 DEBUG [M:0;f2b92657890a:40329 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40133/user/jenkins/test-data/e2181a76-a6ea-cbf4-6f65-5ed5cb1b2424/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/d52e1f7983e44ddc866075226b3a0229 as hdfs://localhost:40133/user/jenkins/test-data/e2181a76-a6ea-cbf4-6f65-5ed5cb1b2424/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/d52e1f7983e44ddc866075226b3a0229 2024-11-24T18:53:03,001 INFO [M:0;f2b92657890a:40329 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40133/user/jenkins/test-data/e2181a76-a6ea-cbf4-6f65-5ed5cb1b2424/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/d52e1f7983e44ddc866075226b3a0229, entries=8, sequenceid=29, filesize=5.5 K 2024-11-24T18:53:03,002 DEBUG [M:0;f2b92657890a:40329 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40133/user/jenkins/test-data/e2181a76-a6ea-cbf4-6f65-5ed5cb1b2424/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/aad60045ea9a4cd0b5aa63f3d5e0c281 as hdfs://localhost:40133/user/jenkins/test-data/e2181a76-a6ea-cbf4-6f65-5ed5cb1b2424/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/aad60045ea9a4cd0b5aa63f3d5e0c281 2024-11-24T18:53:03,009 INFO [M:0;f2b92657890a:40329 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40133/user/jenkins/test-data/e2181a76-a6ea-cbf4-6f65-5ed5cb1b2424/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/aad60045ea9a4cd0b5aa63f3d5e0c281, entries=3, sequenceid=29, filesize=5.2 K 2024-11-24T18:53:03,011 DEBUG [M:0;f2b92657890a:40329 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40133/user/jenkins/test-data/e2181a76-a6ea-cbf4-6f65-5ed5cb1b2424/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/2420c47eaba04353b8a3fa6f2aa10020 as hdfs://localhost:40133/user/jenkins/test-data/e2181a76-a6ea-cbf4-6f65-5ed5cb1b2424/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/2420c47eaba04353b8a3fa6f2aa10020 2024-11-24T18:53:03,017 INFO [M:0;f2b92657890a:40329 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40133/user/jenkins/test-data/e2181a76-a6ea-cbf4-6f65-5ed5cb1b2424/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/2420c47eaba04353b8a3fa6f2aa10020, entries=1, sequenceid=29, filesize=5.0 K 2024-11-24T18:53:03,019 DEBUG [M:0;f2b92657890a:40329 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40133/user/jenkins/test-data/e2181a76-a6ea-cbf4-6f65-5ed5cb1b2424/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/118e1b4c4d70479a93b9380ae1246c36 as hdfs://localhost:40133/user/jenkins/test-data/e2181a76-a6ea-cbf4-6f65-5ed5cb1b2424/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/118e1b4c4d70479a93b9380ae1246c36 2024-11-24T18:53:03,024 INFO [M:0;f2b92657890a:40329 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40133/user/jenkins/test-data/e2181a76-a6ea-cbf4-6f65-5ed5cb1b2424/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/118e1b4c4d70479a93b9380ae1246c36, entries=1, sequenceid=29, filesize=4.9 K 2024-11-24T18:53:03,025 INFO [M:0;f2b92657890a:40329 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 141ms, sequenceid=29, compaction requested=false 2024-11-24T18:53:03,027 INFO [M:0;f2b92657890a:40329 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T18:53:03,027 DEBUG [M:0;f2b92657890a:40329 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732474382884Disabling compacts and flushes for region at 1732474382884Disabling writes for close at 1732474382884Obtaining lock to block concurrent updates at 1732474382884Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732474382884Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=7850, getHeapSize=11544, getOffHeapSize=0, getCellsCount=36 at 1732474382885 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732474382885Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732474382886 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732474382900 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732474382900Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732474382912 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732474382928 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732474382928Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732474382941 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732474382954 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732474382955 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1732474382967 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1732474382982 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1732474382982Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4cc397ec: reopening flushed file at 1732474382993 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@661823db: reopening flushed file at 1732474383001 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3c84163b: reopening flushed file at 1732474383010 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1957b86d: reopening flushed file at 1732474383018 (+8 ms)Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 141ms, sequenceid=29, compaction requested=false at 1732474383025 (+7 ms)Writing region close event to WAL at 1732474383027 (+2 ms)Closed at 1732474383027 2024-11-24T18:53:03,027 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:53:03,027 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:53:03,027 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:53:03,028 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:53:03,028 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:53:03,030 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46539 is added to blk_1073741830_1006 (size=10311) 2024-11-24T18:53:03,030 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41537 is added to blk_1073741830_1006 (size=10311) 2024-11-24T18:53:03,031 INFO [M:0;f2b92657890a:40329 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-24T18:53:03,031 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-24T18:53:03,031 INFO [M:0;f2b92657890a:40329 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:40329 2024-11-24T18:53:03,031 INFO [M:0;f2b92657890a:40329 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-24T18:53:03,144 INFO [M:0;f2b92657890a:40329 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-24T18:53:03,144 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40329-0x1016e3117f40000, quorum=127.0.0.1:49562, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-24T18:53:03,144 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40329-0x1016e3117f40000, quorum=127.0.0.1:49562, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-24T18:53:03,149 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@668848ff{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T18:53:03,150 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1dca50a{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-24T18:53:03,151 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-24T18:53:03,151 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@17d00685{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-24T18:53:03,151 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@573af0f2{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a73be3e4-71ec-b7d1-ab6a-a202f4a80da2/hadoop.log.dir/,STOPPED} 2024-11-24T18:53:03,154 WARN [BP-1416387336-172.17.0.2-1732474376887 heartbeating to localhost/127.0.0.1:40133 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-24T18:53:03,154 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-24T18:53:03,155 WARN [BP-1416387336-172.17.0.2-1732474376887 heartbeating to localhost/127.0.0.1:40133 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1416387336-172.17.0.2-1732474376887 (Datanode Uuid 193933c0-9521-4621-ab64-d339023ba983) service to localhost/127.0.0.1:40133 2024-11-24T18:53:03,155 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-24T18:53:03,156 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a73be3e4-71ec-b7d1-ab6a-a202f4a80da2/cluster_7fffde26-21e2-9b66-c37c-f0466c01c8ba/data/data3/current/BP-1416387336-172.17.0.2-1732474376887 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T18:53:03,156 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a73be3e4-71ec-b7d1-ab6a-a202f4a80da2/cluster_7fffde26-21e2-9b66-c37c-f0466c01c8ba/data/data4/current/BP-1416387336-172.17.0.2-1732474376887 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T18:53:03,156 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-24T18:53:03,159 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@54b1fb62{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T18:53:03,160 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@38966cd1{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-24T18:53:03,160 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-24T18:53:03,160 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@21e00560{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-24T18:53:03,160 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6e7873b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a73be3e4-71ec-b7d1-ab6a-a202f4a80da2/hadoop.log.dir/,STOPPED} 2024-11-24T18:53:03,162 WARN [BP-1416387336-172.17.0.2-1732474376887 heartbeating to localhost/127.0.0.1:40133 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-24T18:53:03,162 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-24T18:53:03,162 WARN [BP-1416387336-172.17.0.2-1732474376887 heartbeating to localhost/127.0.0.1:40133 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1416387336-172.17.0.2-1732474376887 (Datanode Uuid 90a21d30-efc4-4c54-85aa-c7be11f6cb62) service to localhost/127.0.0.1:40133 2024-11-24T18:53:03,162 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-24T18:53:03,162 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a73be3e4-71ec-b7d1-ab6a-a202f4a80da2/cluster_7fffde26-21e2-9b66-c37c-f0466c01c8ba/data/data1/current/BP-1416387336-172.17.0.2-1732474376887 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T18:53:03,163 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a73be3e4-71ec-b7d1-ab6a-a202f4a80da2/cluster_7fffde26-21e2-9b66-c37c-f0466c01c8ba/data/data2/current/BP-1416387336-172.17.0.2-1732474376887 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T18:53:03,163 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-24T18:53:03,169 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3fa5684d{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-24T18:53:03,170 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7bd218c7{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-24T18:53:03,170 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-24T18:53:03,171 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2152d149{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-24T18:53:03,171 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1a15ed6a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a73be3e4-71ec-b7d1-ab6a-a202f4a80da2/hadoop.log.dir/,STOPPED} 2024-11-24T18:53:03,176 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-24T18:53:03,194 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-24T18:53:03,194 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-24T18:53:03,194 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a73be3e4-71ec-b7d1-ab6a-a202f4a80da2/hadoop.log.dir so I do NOT create it in target/test-data/6e6689f1-50c4-dda5-193a-9d3c069fe268 2024-11-24T18:53:03,194 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a73be3e4-71ec-b7d1-ab6a-a202f4a80da2/hadoop.tmp.dir so I do NOT create it in target/test-data/6e6689f1-50c4-dda5-193a-9d3c069fe268 2024-11-24T18:53:03,194 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e6689f1-50c4-dda5-193a-9d3c069fe268/cluster_18358266-8dd0-4c2e-1aeb-38759830aa93, deleteOnExit=true 2024-11-24T18:53:03,194 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-24T18:53:03,195 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e6689f1-50c4-dda5-193a-9d3c069fe268/test.cache.data in system properties and HBase conf 2024-11-24T18:53:03,195 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e6689f1-50c4-dda5-193a-9d3c069fe268/hadoop.tmp.dir in system properties and HBase conf 2024-11-24T18:53:03,195 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e6689f1-50c4-dda5-193a-9d3c069fe268/hadoop.log.dir in system properties and HBase conf 2024-11-24T18:53:03,195 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e6689f1-50c4-dda5-193a-9d3c069fe268/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-24T18:53:03,195 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e6689f1-50c4-dda5-193a-9d3c069fe268/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-24T18:53:03,195 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-24T18:53:03,195 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-24T18:53:03,195 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e6689f1-50c4-dda5-193a-9d3c069fe268/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-24T18:53:03,195 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e6689f1-50c4-dda5-193a-9d3c069fe268/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-24T18:53:03,196 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e6689f1-50c4-dda5-193a-9d3c069fe268/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-24T18:53:03,196 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e6689f1-50c4-dda5-193a-9d3c069fe268/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-24T18:53:03,196 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e6689f1-50c4-dda5-193a-9d3c069fe268/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-24T18:53:03,196 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e6689f1-50c4-dda5-193a-9d3c069fe268/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-24T18:53:03,196 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e6689f1-50c4-dda5-193a-9d3c069fe268/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-24T18:53:03,196 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e6689f1-50c4-dda5-193a-9d3c069fe268/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-24T18:53:03,196 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e6689f1-50c4-dda5-193a-9d3c069fe268/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-24T18:53:03,196 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e6689f1-50c4-dda5-193a-9d3c069fe268/nfs.dump.dir in system properties and HBase conf 2024-11-24T18:53:03,196 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e6689f1-50c4-dda5-193a-9d3c069fe268/java.io.tmpdir in system properties and HBase conf 2024-11-24T18:53:03,196 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e6689f1-50c4-dda5-193a-9d3c069fe268/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-24T18:53:03,196 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e6689f1-50c4-dda5-193a-9d3c069fe268/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-24T18:53:03,197 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e6689f1-50c4-dda5-193a-9d3c069fe268/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-24T18:53:03,208 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-24T18:53:03,494 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T18:53:03,500 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-24T18:53:03,501 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-24T18:53:03,501 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-24T18:53:03,501 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-24T18:53:03,502 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T18:53:03,502 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3150e6db{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e6689f1-50c4-dda5-193a-9d3c069fe268/hadoop.log.dir/,AVAILABLE} 2024-11-24T18:53:03,502 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1d790455{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-24T18:53:03,596 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7982676d{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e6689f1-50c4-dda5-193a-9d3c069fe268/java.io.tmpdir/jetty-localhost-33831-hadoop-hdfs-3_4_1-tests_jar-_-any-1767077153202478437/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-24T18:53:03,597 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2efbdc75{HTTP/1.1, (http/1.1)}{localhost:33831} 2024-11-24T18:53:03,597 INFO [Time-limited test {}] server.Server(415): Started @112949ms 2024-11-24T18:53:03,609 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-24T18:53:03,674 INFO [regionserver/f2b92657890a:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-24T18:53:03,866 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T18:53:03,871 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-24T18:53:03,872 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-24T18:53:03,872 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-24T18:53:03,872 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-24T18:53:03,872 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1bf32f74{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e6689f1-50c4-dda5-193a-9d3c069fe268/hadoop.log.dir/,AVAILABLE} 2024-11-24T18:53:03,873 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1bb5d847{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-24T18:53:03,965 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5538b075{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e6689f1-50c4-dda5-193a-9d3c069fe268/java.io.tmpdir/jetty-localhost-41003-hadoop-hdfs-3_4_1-tests_jar-_-any-3249673865369982628/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T18:53:03,965 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3f87a993{HTTP/1.1, (http/1.1)}{localhost:41003} 2024-11-24T18:53:03,965 INFO [Time-limited test {}] server.Server(415): Started @113318ms 2024-11-24T18:53:03,966 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-24T18:53:03,992 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T18:53:03,995 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-24T18:53:03,996 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-24T18:53:03,996 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-24T18:53:03,996 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-24T18:53:03,996 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@32403ac6{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e6689f1-50c4-dda5-193a-9d3c069fe268/hadoop.log.dir/,AVAILABLE} 2024-11-24T18:53:03,997 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4bb19ef9{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-24T18:53:04,089 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@272348fe{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e6689f1-50c4-dda5-193a-9d3c069fe268/java.io.tmpdir/jetty-localhost-40073-hadoop-hdfs-3_4_1-tests_jar-_-any-4731090979197747710/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T18:53:04,089 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@10b53169{HTTP/1.1, (http/1.1)}{localhost:40073} 2024-11-24T18:53:04,090 INFO [Time-limited test {}] server.Server(415): Started @113442ms 2024-11-24T18:53:04,091 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-24T18:53:05,489 WARN [Thread-673 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e6689f1-50c4-dda5-193a-9d3c069fe268/cluster_18358266-8dd0-4c2e-1aeb-38759830aa93/data/data2/current/BP-829498211-172.17.0.2-1732474383219/current, will proceed with Du for space computation calculation, 2024-11-24T18:53:05,489 WARN [Thread-672 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e6689f1-50c4-dda5-193a-9d3c069fe268/cluster_18358266-8dd0-4c2e-1aeb-38759830aa93/data/data1/current/BP-829498211-172.17.0.2-1732474383219/current, will proceed with Du for space computation calculation, 2024-11-24T18:53:05,506 WARN [Thread-636 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-24T18:53:05,509 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x3ed014bfb12f281c with lease ID 0xddbbb5e26caa0120: Processing first storage report for DS-8bdd6e5e-876e-4f73-9203-c2f186854fcc from datanode DatanodeRegistration(127.0.0.1:32783, datanodeUuid=a91e4357-e48c-4cf9-a1b5-74d0a339fe61, infoPort=36183, infoSecurePort=0, ipcPort=36637, storageInfo=lv=-57;cid=testClusterID;nsid=1784485339;c=1732474383219) 2024-11-24T18:53:05,509 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x3ed014bfb12f281c with lease ID 0xddbbb5e26caa0120: from storage DS-8bdd6e5e-876e-4f73-9203-c2f186854fcc node DatanodeRegistration(127.0.0.1:32783, datanodeUuid=a91e4357-e48c-4cf9-a1b5-74d0a339fe61, infoPort=36183, infoSecurePort=0, ipcPort=36637, storageInfo=lv=-57;cid=testClusterID;nsid=1784485339;c=1732474383219), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T18:53:05,509 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x3ed014bfb12f281c with lease ID 0xddbbb5e26caa0120: Processing first storage report for DS-1c4dae80-49d6-48ca-a55e-060435241a70 from datanode DatanodeRegistration(127.0.0.1:32783, datanodeUuid=a91e4357-e48c-4cf9-a1b5-74d0a339fe61, infoPort=36183, infoSecurePort=0, ipcPort=36637, storageInfo=lv=-57;cid=testClusterID;nsid=1784485339;c=1732474383219) 2024-11-24T18:53:05,509 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x3ed014bfb12f281c with lease ID 0xddbbb5e26caa0120: from storage DS-1c4dae80-49d6-48ca-a55e-060435241a70 node DatanodeRegistration(127.0.0.1:32783, datanodeUuid=a91e4357-e48c-4cf9-a1b5-74d0a339fe61, infoPort=36183, infoSecurePort=0, ipcPort=36637, storageInfo=lv=-57;cid=testClusterID;nsid=1784485339;c=1732474383219), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T18:53:05,630 WARN [Thread-683 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e6689f1-50c4-dda5-193a-9d3c069fe268/cluster_18358266-8dd0-4c2e-1aeb-38759830aa93/data/data3/current/BP-829498211-172.17.0.2-1732474383219/current, will proceed with Du for space computation calculation, 2024-11-24T18:53:05,631 WARN [Thread-684 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e6689f1-50c4-dda5-193a-9d3c069fe268/cluster_18358266-8dd0-4c2e-1aeb-38759830aa93/data/data4/current/BP-829498211-172.17.0.2-1732474383219/current, will proceed with Du for space computation calculation, 2024-11-24T18:53:05,650 WARN [Thread-659 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-24T18:53:05,652 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x2e95ee3367bb8239 with lease ID 0xddbbb5e26caa0121: Processing first storage report for DS-7c4fcb9f-751f-4a33-b466-2d1e1999346e from datanode DatanodeRegistration(127.0.0.1:32973, datanodeUuid=adeb25d7-9417-4898-a7a7-6d9e033f0ea2, infoPort=35465, infoSecurePort=0, ipcPort=41587, storageInfo=lv=-57;cid=testClusterID;nsid=1784485339;c=1732474383219) 2024-11-24T18:53:05,652 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x2e95ee3367bb8239 with lease ID 0xddbbb5e26caa0121: from storage DS-7c4fcb9f-751f-4a33-b466-2d1e1999346e node DatanodeRegistration(127.0.0.1:32973, datanodeUuid=adeb25d7-9417-4898-a7a7-6d9e033f0ea2, infoPort=35465, infoSecurePort=0, ipcPort=41587, storageInfo=lv=-57;cid=testClusterID;nsid=1784485339;c=1732474383219), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T18:53:05,652 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x2e95ee3367bb8239 with lease ID 0xddbbb5e26caa0121: Processing first storage report for DS-dae4e14c-1528-4a86-9b43-100d74912dd0 from datanode DatanodeRegistration(127.0.0.1:32973, datanodeUuid=adeb25d7-9417-4898-a7a7-6d9e033f0ea2, infoPort=35465, infoSecurePort=0, ipcPort=41587, storageInfo=lv=-57;cid=testClusterID;nsid=1784485339;c=1732474383219) 2024-11-24T18:53:05,652 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x2e95ee3367bb8239 with lease ID 0xddbbb5e26caa0121: from storage DS-dae4e14c-1528-4a86-9b43-100d74912dd0 node DatanodeRegistration(127.0.0.1:32973, datanodeUuid=adeb25d7-9417-4898-a7a7-6d9e033f0ea2, infoPort=35465, infoSecurePort=0, ipcPort=41587, storageInfo=lv=-57;cid=testClusterID;nsid=1784485339;c=1732474383219), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T18:53:05,738 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e6689f1-50c4-dda5-193a-9d3c069fe268 2024-11-24T18:53:05,742 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e6689f1-50c4-dda5-193a-9d3c069fe268/cluster_18358266-8dd0-4c2e-1aeb-38759830aa93/zookeeper_0, clientPort=49552, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e6689f1-50c4-dda5-193a-9d3c069fe268/cluster_18358266-8dd0-4c2e-1aeb-38759830aa93/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e6689f1-50c4-dda5-193a-9d3c069fe268/cluster_18358266-8dd0-4c2e-1aeb-38759830aa93/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-24T18:53:05,743 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=49552 2024-11-24T18:53:05,743 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T18:53:05,744 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T18:53:05,753 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32973 is added to blk_1073741825_1001 (size=7) 2024-11-24T18:53:05,754 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32783 is added to blk_1073741825_1001 (size=7) 2024-11-24T18:53:05,755 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98 with version=8 2024-11-24T18:53:05,755 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:40437/user/jenkins/test-data/91bb223a-a964-b097-0799-aa34536414f2/hbase-staging 2024-11-24T18:53:05,757 INFO [Time-limited test {}] client.ConnectionUtils(128): master/f2b92657890a:0 server-side Connection retries=45 2024-11-24T18:53:05,757 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-24T18:53:05,757 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-24T18:53:05,757 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-24T18:53:05,757 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-24T18:53:05,757 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-24T18:53:05,757 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-24T18:53:05,758 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-24T18:53:05,758 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:41719 2024-11-24T18:53:05,760 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:41719 connecting to ZooKeeper ensemble=127.0.0.1:49552 2024-11-24T18:53:05,822 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:417190x0, quorum=127.0.0.1:49552, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-24T18:53:05,822 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:41719-0x1016e3130f80000 connected 2024-11-24T18:53:05,907 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T18:53:05,909 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T18:53:05,911 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:41719-0x1016e3130f80000, quorum=127.0.0.1:49552, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-24T18:53:05,911 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98, hbase.cluster.distributed=false 2024-11-24T18:53:05,913 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:41719-0x1016e3130f80000, quorum=127.0.0.1:49552, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-24T18:53:05,914 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=41719 2024-11-24T18:53:05,914 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=41719 2024-11-24T18:53:05,914 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=41719 2024-11-24T18:53:05,915 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=41719 2024-11-24T18:53:05,915 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=41719 2024-11-24T18:53:05,930 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/f2b92657890a:0 server-side Connection retries=45 2024-11-24T18:53:05,930 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-24T18:53:05,930 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-24T18:53:05,930 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-24T18:53:05,930 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-24T18:53:05,930 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-24T18:53:05,931 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-24T18:53:05,931 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-24T18:53:05,932 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:45405 2024-11-24T18:53:05,933 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:45405 connecting to ZooKeeper ensemble=127.0.0.1:49552 2024-11-24T18:53:05,934 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T18:53:05,936 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T18:53:05,949 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:454050x0, quorum=127.0.0.1:49552, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-24T18:53:05,950 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:454050x0, quorum=127.0.0.1:49552, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-24T18:53:05,950 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:45405-0x1016e3130f80001 connected 2024-11-24T18:53:05,950 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-24T18:53:05,951 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-24T18:53:05,951 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45405-0x1016e3130f80001, quorum=127.0.0.1:49552, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-24T18:53:05,952 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45405-0x1016e3130f80001, quorum=127.0.0.1:49552, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-24T18:53:05,953 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=45405 2024-11-24T18:53:05,953 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=45405 2024-11-24T18:53:05,953 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=45405 2024-11-24T18:53:05,954 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=45405 2024-11-24T18:53:05,954 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=45405 2024-11-24T18:53:05,965 DEBUG [M:0;f2b92657890a:41719 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;f2b92657890a:41719 2024-11-24T18:53:05,965 INFO [master/f2b92657890a:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/f2b92657890a,41719,1732474385757 2024-11-24T18:53:05,970 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41719-0x1016e3130f80000, quorum=127.0.0.1:49552, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-24T18:53:05,970 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45405-0x1016e3130f80001, quorum=127.0.0.1:49552, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-24T18:53:05,970 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:41719-0x1016e3130f80000, quorum=127.0.0.1:49552, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/f2b92657890a,41719,1732474385757 2024-11-24T18:53:05,980 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45405-0x1016e3130f80001, quorum=127.0.0.1:49552, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-24T18:53:05,980 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41719-0x1016e3130f80000, quorum=127.0.0.1:49552, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T18:53:05,980 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45405-0x1016e3130f80001, quorum=127.0.0.1:49552, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T18:53:05,981 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:41719-0x1016e3130f80000, quorum=127.0.0.1:49552, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-24T18:53:05,982 INFO [master/f2b92657890a:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/f2b92657890a,41719,1732474385757 from backup master directory 2024-11-24T18:53:05,991 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41719-0x1016e3130f80000, quorum=127.0.0.1:49552, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/f2b92657890a,41719,1732474385757 2024-11-24T18:53:05,991 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45405-0x1016e3130f80001, quorum=127.0.0.1:49552, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-24T18:53:05,991 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41719-0x1016e3130f80000, quorum=127.0.0.1:49552, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-24T18:53:05,991 WARN [master/f2b92657890a:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-24T18:53:05,991 INFO [master/f2b92657890a:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=f2b92657890a,41719,1732474385757 2024-11-24T18:53:05,997 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/hbase.id] with ID: 2ae8e624-f6fd-46d8-904d-a5c05fcfc37e 2024-11-24T18:53:05,998 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/.tmp/hbase.id 2024-11-24T18:53:06,004 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32973 is added to blk_1073741826_1002 (size=42) 2024-11-24T18:53:06,004 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32783 is added to blk_1073741826_1002 (size=42) 2024-11-24T18:53:06,005 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/.tmp/hbase.id]:[hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/hbase.id] 2024-11-24T18:53:06,017 INFO [master/f2b92657890a:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T18:53:06,017 INFO [master/f2b92657890a:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-24T18:53:06,019 INFO [master/f2b92657890a:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-11-24T18:53:06,031 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41719-0x1016e3130f80000, quorum=127.0.0.1:49552, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T18:53:06,031 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45405-0x1016e3130f80001, quorum=127.0.0.1:49552, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T18:53:06,038 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32783 is added to blk_1073741827_1003 (size=196) 2024-11-24T18:53:06,039 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32973 is added to blk_1073741827_1003 (size=196) 2024-11-24T18:53:06,039 INFO [master/f2b92657890a:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-24T18:53:06,040 INFO [master/f2b92657890a:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-24T18:53:06,040 INFO [master/f2b92657890a:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-24T18:53:06,049 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32973 is added to blk_1073741828_1004 (size=1189) 2024-11-24T18:53:06,049 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32783 is added to blk_1073741828_1004 (size=1189) 2024-11-24T18:53:06,051 INFO [master/f2b92657890a:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/MasterData/data/master/store 2024-11-24T18:53:06,059 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32973 is added to blk_1073741829_1005 (size=34) 2024-11-24T18:53:06,060 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32783 is added to blk_1073741829_1005 (size=34) 2024-11-24T18:53:06,060 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T18:53:06,061 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-24T18:53:06,061 INFO [master/f2b92657890a:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T18:53:06,061 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T18:53:06,061 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-24T18:53:06,061 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T18:53:06,061 INFO [master/f2b92657890a:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T18:53:06,061 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732474386061Disabling compacts and flushes for region at 1732474386061Disabling writes for close at 1732474386061Writing region close event to WAL at 1732474386061Closed at 1732474386061 2024-11-24T18:53:06,062 WARN [master/f2b92657890a:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/MasterData/data/master/store/.initializing 2024-11-24T18:53:06,063 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/MasterData/WALs/f2b92657890a,41719,1732474385757 2024-11-24T18:53:06,066 INFO [master/f2b92657890a:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=f2b92657890a%2C41719%2C1732474385757, suffix=, logDir=hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/MasterData/WALs/f2b92657890a,41719,1732474385757, archiveDir=hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/MasterData/oldWALs, maxLogs=10 2024-11-24T18:53:06,067 INFO [master/f2b92657890a:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor f2b92657890a%2C41719%2C1732474385757.1732474386067 2024-11-24T18:53:06,073 INFO [master/f2b92657890a:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/MasterData/WALs/f2b92657890a,41719,1732474385757/f2b92657890a%2C41719%2C1732474385757.1732474386067 2024-11-24T18:53:06,074 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35465:35465),(127.0.0.1/127.0.0.1:36183:36183)] 2024-11-24T18:53:06,075 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-24T18:53:06,075 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T18:53:06,076 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T18:53:06,076 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T18:53:06,078 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-24T18:53:06,079 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-24T18:53:06,080 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T18:53:06,080 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T18:53:06,080 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-24T18:53:06,082 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-24T18:53:06,082 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T18:53:06,083 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T18:53:06,083 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-24T18:53:06,085 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-24T18:53:06,085 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T18:53:06,085 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T18:53:06,086 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-24T18:53:06,087 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-24T18:53:06,087 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T18:53:06,088 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T18:53:06,088 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T18:53:06,089 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-24T18:53:06,090 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-24T18:53:06,091 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T18:53:06,091 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T18:53:06,092 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-24T18:53:06,093 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T18:53:06,095 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-24T18:53:06,096 INFO [master/f2b92657890a:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=733095, jitterRate=-0.06782263517379761}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-24T18:53:06,097 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732474386076Initializing all the Stores at 1732474386077 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732474386077Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732474386077Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732474386077Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732474386077Cleaning up temporary data from old regions at 1732474386091 (+14 ms)Region opened successfully at 1732474386097 (+6 ms) 2024-11-24T18:53:06,097 INFO [master/f2b92657890a:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-24T18:53:06,100 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5df7e637, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=f2b92657890a/172.17.0.2:0 2024-11-24T18:53:06,101 INFO [master/f2b92657890a:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-24T18:53:06,102 INFO [master/f2b92657890a:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-24T18:53:06,102 INFO [master/f2b92657890a:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-24T18:53:06,102 INFO [master/f2b92657890a:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-24T18:53:06,103 INFO [master/f2b92657890a:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-24T18:53:06,103 INFO [master/f2b92657890a:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-24T18:53:06,103 INFO [master/f2b92657890a:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-24T18:53:06,105 INFO [master/f2b92657890a:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-24T18:53:06,106 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41719-0x1016e3130f80000, quorum=127.0.0.1:49552, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-24T18:53:06,117 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-24T18:53:06,118 INFO [master/f2b92657890a:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-24T18:53:06,119 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41719-0x1016e3130f80000, quorum=127.0.0.1:49552, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-24T18:53:06,128 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-24T18:53:06,128 INFO [master/f2b92657890a:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-24T18:53:06,130 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41719-0x1016e3130f80000, quorum=127.0.0.1:49552, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-24T18:53:06,138 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-24T18:53:06,139 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41719-0x1016e3130f80000, quorum=127.0.0.1:49552, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-24T18:53:06,149 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-24T18:53:06,152 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41719-0x1016e3130f80000, quorum=127.0.0.1:49552, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-24T18:53:06,159 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-24T18:53:06,170 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41719-0x1016e3130f80000, quorum=127.0.0.1:49552, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-24T18:53:06,170 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45405-0x1016e3130f80001, quorum=127.0.0.1:49552, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-24T18:53:06,170 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45405-0x1016e3130f80001, quorum=127.0.0.1:49552, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T18:53:06,170 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41719-0x1016e3130f80000, quorum=127.0.0.1:49552, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T18:53:06,170 INFO [master/f2b92657890a:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=f2b92657890a,41719,1732474385757, sessionid=0x1016e3130f80000, setting cluster-up flag (Was=false) 2024-11-24T18:53:06,191 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41719-0x1016e3130f80000, quorum=127.0.0.1:49552, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T18:53:06,191 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45405-0x1016e3130f80001, quorum=127.0.0.1:49552, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T18:53:06,223 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-24T18:53:06,224 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=f2b92657890a,41719,1732474385757 2024-11-24T18:53:06,244 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41719-0x1016e3130f80000, quorum=127.0.0.1:49552, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T18:53:06,244 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45405-0x1016e3130f80001, quorum=127.0.0.1:49552, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T18:53:06,275 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-24T18:53:06,276 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=f2b92657890a,41719,1732474385757 2024-11-24T18:53:06,278 INFO [master/f2b92657890a:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-24T18:53:06,280 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-24T18:53:06,281 INFO [master/f2b92657890a:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-24T18:53:06,281 INFO [master/f2b92657890a:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-24T18:53:06,281 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: f2b92657890a,41719,1732474385757 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-24T18:53:06,282 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/f2b92657890a:0, corePoolSize=5, maxPoolSize=5 2024-11-24T18:53:06,283 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/f2b92657890a:0, corePoolSize=5, maxPoolSize=5 2024-11-24T18:53:06,283 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/f2b92657890a:0, corePoolSize=5, maxPoolSize=5 2024-11-24T18:53:06,283 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/f2b92657890a:0, corePoolSize=5, maxPoolSize=5 2024-11-24T18:53:06,283 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/f2b92657890a:0, corePoolSize=10, maxPoolSize=10 2024-11-24T18:53:06,283 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/f2b92657890a:0, corePoolSize=1, maxPoolSize=1 2024-11-24T18:53:06,283 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/f2b92657890a:0, corePoolSize=2, maxPoolSize=2 2024-11-24T18:53:06,283 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/f2b92657890a:0, corePoolSize=1, maxPoolSize=1 2024-11-24T18:53:06,284 INFO [master/f2b92657890a:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732474416284 2024-11-24T18:53:06,284 INFO [master/f2b92657890a:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-24T18:53:06,284 INFO [master/f2b92657890a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-24T18:53:06,284 INFO [master/f2b92657890a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-24T18:53:06,284 INFO [master/f2b92657890a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-24T18:53:06,284 INFO [master/f2b92657890a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-24T18:53:06,284 INFO [master/f2b92657890a:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-24T18:53:06,284 INFO [master/f2b92657890a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-24T18:53:06,285 INFO [master/f2b92657890a:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-24T18:53:06,285 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-24T18:53:06,285 INFO [master/f2b92657890a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-24T18:53:06,285 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-24T18:53:06,285 INFO [master/f2b92657890a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-24T18:53:06,285 INFO [master/f2b92657890a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-24T18:53:06,285 INFO [master/f2b92657890a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-24T18:53:06,286 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T18:53:06,286 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/f2b92657890a:0:becomeActiveMaster-HFileCleaner.large.0-1732474386286,5,FailOnTimeoutGroup] 2024-11-24T18:53:06,287 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/f2b92657890a:0:becomeActiveMaster-HFileCleaner.small.0-1732474386286,5,FailOnTimeoutGroup] 2024-11-24T18:53:06,286 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-24T18:53:06,287 INFO [master/f2b92657890a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-24T18:53:06,287 INFO [master/f2b92657890a:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-24T18:53:06,287 INFO [master/f2b92657890a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-24T18:53:06,287 INFO [master/f2b92657890a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-24T18:53:06,296 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32973 is added to blk_1073741831_1007 (size=1321) 2024-11-24T18:53:06,296 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32783 is added to blk_1073741831_1007 (size=1321) 2024-11-24T18:53:06,297 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-24T18:53:06,297 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98 2024-11-24T18:53:06,305 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32973 is added to blk_1073741832_1008 (size=32) 2024-11-24T18:53:06,305 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32783 is added to blk_1073741832_1008 (size=32) 2024-11-24T18:53:06,306 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T18:53:06,308 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-24T18:53:06,309 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-24T18:53:06,309 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T18:53:06,310 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T18:53:06,310 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-24T18:53:06,312 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-24T18:53:06,312 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T18:53:06,312 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T18:53:06,312 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-24T18:53:06,314 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-24T18:53:06,314 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T18:53:06,314 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T18:53:06,315 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-24T18:53:06,316 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-24T18:53:06,316 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T18:53:06,317 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T18:53:06,317 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-24T18:53:06,318 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/data/hbase/meta/1588230740 2024-11-24T18:53:06,318 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/data/hbase/meta/1588230740 2024-11-24T18:53:06,320 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-24T18:53:06,320 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-24T18:53:06,320 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-24T18:53:06,321 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-24T18:53:06,324 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-24T18:53:06,324 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=866641, jitterRate=0.10199214518070221}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-24T18:53:06,325 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732474386306Initializing all the Stores at 1732474386307 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732474386307Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732474386307Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732474386307Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732474386308 (+1 ms)Cleaning up temporary data from old regions at 1732474386320 (+12 ms)Region opened successfully at 1732474386325 (+5 ms) 2024-11-24T18:53:06,325 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-24T18:53:06,325 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-24T18:53:06,325 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-24T18:53:06,325 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-24T18:53:06,325 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-24T18:53:06,326 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-24T18:53:06,326 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732474386325Disabling compacts and flushes for region at 1732474386325Disabling writes for close at 1732474386325Writing region close event to WAL at 1732474386326 (+1 ms)Closed at 1732474386326 2024-11-24T18:53:06,327 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-24T18:53:06,327 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-24T18:53:06,327 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-24T18:53:06,329 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-24T18:53:06,330 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-24T18:53:06,356 INFO [RS:0;f2b92657890a:45405 {}] regionserver.HRegionServer(746): ClusterId : 2ae8e624-f6fd-46d8-904d-a5c05fcfc37e 2024-11-24T18:53:06,356 DEBUG [RS:0;f2b92657890a:45405 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-24T18:53:06,369 DEBUG [RS:0;f2b92657890a:45405 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-24T18:53:06,369 DEBUG [RS:0;f2b92657890a:45405 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-24T18:53:06,381 DEBUG [RS:0;f2b92657890a:45405 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-24T18:53:06,382 DEBUG [RS:0;f2b92657890a:45405 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@10e5d32b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=f2b92657890a/172.17.0.2:0 2024-11-24T18:53:06,399 DEBUG [RS:0;f2b92657890a:45405 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;f2b92657890a:45405 2024-11-24T18:53:06,399 INFO [RS:0;f2b92657890a:45405 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-24T18:53:06,399 INFO [RS:0;f2b92657890a:45405 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-24T18:53:06,399 DEBUG [RS:0;f2b92657890a:45405 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-24T18:53:06,400 INFO [RS:0;f2b92657890a:45405 {}] regionserver.HRegionServer(2659): reportForDuty to master=f2b92657890a,41719,1732474385757 with port=45405, startcode=1732474385930 2024-11-24T18:53:06,400 DEBUG [RS:0;f2b92657890a:45405 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-24T18:53:06,402 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60629, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-11-24T18:53:06,403 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41719 {}] master.ServerManager(363): Checking decommissioned status of RegionServer f2b92657890a,45405,1732474385930 2024-11-24T18:53:06,403 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41719 {}] master.ServerManager(517): Registering regionserver=f2b92657890a,45405,1732474385930 2024-11-24T18:53:06,405 DEBUG [RS:0;f2b92657890a:45405 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98 2024-11-24T18:53:06,405 DEBUG [RS:0;f2b92657890a:45405 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:37713 2024-11-24T18:53:06,405 DEBUG [RS:0;f2b92657890a:45405 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-24T18:53:06,412 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41719-0x1016e3130f80000, quorum=127.0.0.1:49552, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-24T18:53:06,412 DEBUG [RS:0;f2b92657890a:45405 {}] zookeeper.ZKUtil(111): regionserver:45405-0x1016e3130f80001, quorum=127.0.0.1:49552, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/f2b92657890a,45405,1732474385930 2024-11-24T18:53:06,413 WARN [RS:0;f2b92657890a:45405 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-24T18:53:06,413 INFO [RS:0;f2b92657890a:45405 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-24T18:53:06,413 DEBUG [RS:0;f2b92657890a:45405 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930 2024-11-24T18:53:06,413 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [f2b92657890a,45405,1732474385930] 2024-11-24T18:53:06,417 INFO [RS:0;f2b92657890a:45405 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-24T18:53:06,420 INFO [RS:0;f2b92657890a:45405 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-24T18:53:06,420 INFO [RS:0;f2b92657890a:45405 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-24T18:53:06,420 INFO [RS:0;f2b92657890a:45405 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T18:53:06,421 INFO [RS:0;f2b92657890a:45405 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-24T18:53:06,422 INFO [RS:0;f2b92657890a:45405 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-24T18:53:06,422 INFO [RS:0;f2b92657890a:45405 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-24T18:53:06,422 DEBUG [RS:0;f2b92657890a:45405 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/f2b92657890a:0, corePoolSize=1, maxPoolSize=1 2024-11-24T18:53:06,422 DEBUG [RS:0;f2b92657890a:45405 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/f2b92657890a:0, corePoolSize=1, maxPoolSize=1 2024-11-24T18:53:06,422 DEBUG [RS:0;f2b92657890a:45405 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/f2b92657890a:0, corePoolSize=1, maxPoolSize=1 2024-11-24T18:53:06,422 DEBUG [RS:0;f2b92657890a:45405 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/f2b92657890a:0, corePoolSize=1, maxPoolSize=1 2024-11-24T18:53:06,423 DEBUG [RS:0;f2b92657890a:45405 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/f2b92657890a:0, corePoolSize=1, maxPoolSize=1 2024-11-24T18:53:06,423 DEBUG [RS:0;f2b92657890a:45405 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/f2b92657890a:0, corePoolSize=2, maxPoolSize=2 2024-11-24T18:53:06,423 DEBUG [RS:0;f2b92657890a:45405 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/f2b92657890a:0, corePoolSize=1, maxPoolSize=1 2024-11-24T18:53:06,423 DEBUG [RS:0;f2b92657890a:45405 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/f2b92657890a:0, corePoolSize=1, maxPoolSize=1 2024-11-24T18:53:06,423 DEBUG [RS:0;f2b92657890a:45405 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/f2b92657890a:0, corePoolSize=1, maxPoolSize=1 2024-11-24T18:53:06,423 DEBUG [RS:0;f2b92657890a:45405 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/f2b92657890a:0, corePoolSize=1, maxPoolSize=1 2024-11-24T18:53:06,423 DEBUG [RS:0;f2b92657890a:45405 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/f2b92657890a:0, corePoolSize=1, maxPoolSize=1 2024-11-24T18:53:06,423 DEBUG [RS:0;f2b92657890a:45405 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/f2b92657890a:0, corePoolSize=1, maxPoolSize=1 2024-11-24T18:53:06,423 DEBUG [RS:0;f2b92657890a:45405 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/f2b92657890a:0, corePoolSize=3, maxPoolSize=3 2024-11-24T18:53:06,423 DEBUG [RS:0;f2b92657890a:45405 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/f2b92657890a:0, corePoolSize=3, maxPoolSize=3 2024-11-24T18:53:06,424 INFO [RS:0;f2b92657890a:45405 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-24T18:53:06,424 INFO [RS:0;f2b92657890a:45405 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-24T18:53:06,424 INFO [RS:0;f2b92657890a:45405 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T18:53:06,424 INFO [RS:0;f2b92657890a:45405 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-24T18:53:06,424 INFO [RS:0;f2b92657890a:45405 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-24T18:53:06,424 INFO [RS:0;f2b92657890a:45405 {}] hbase.ChoreService(168): Chore ScheduledChore name=f2b92657890a,45405,1732474385930-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-24T18:53:06,440 INFO [RS:0;f2b92657890a:45405 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-24T18:53:06,441 INFO [RS:0;f2b92657890a:45405 {}] hbase.ChoreService(168): Chore ScheduledChore name=f2b92657890a,45405,1732474385930-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T18:53:06,441 INFO [RS:0;f2b92657890a:45405 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T18:53:06,441 INFO [RS:0;f2b92657890a:45405 {}] regionserver.Replication(171): f2b92657890a,45405,1732474385930 started 2024-11-24T18:53:06,476 INFO [RS:0;f2b92657890a:45405 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T18:53:06,476 INFO [RS:0;f2b92657890a:45405 {}] regionserver.HRegionServer(1482): Serving as f2b92657890a,45405,1732474385930, RpcServer on f2b92657890a/172.17.0.2:45405, sessionid=0x1016e3130f80001 2024-11-24T18:53:06,477 DEBUG [RS:0;f2b92657890a:45405 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-24T18:53:06,477 DEBUG [RS:0;f2b92657890a:45405 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager f2b92657890a,45405,1732474385930 2024-11-24T18:53:06,477 DEBUG [RS:0;f2b92657890a:45405 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'f2b92657890a,45405,1732474385930' 2024-11-24T18:53:06,477 DEBUG [RS:0;f2b92657890a:45405 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-24T18:53:06,477 DEBUG [RS:0;f2b92657890a:45405 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-24T18:53:06,478 DEBUG [RS:0;f2b92657890a:45405 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-24T18:53:06,478 DEBUG [RS:0;f2b92657890a:45405 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-24T18:53:06,478 DEBUG [RS:0;f2b92657890a:45405 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager f2b92657890a,45405,1732474385930 2024-11-24T18:53:06,478 DEBUG [RS:0;f2b92657890a:45405 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'f2b92657890a,45405,1732474385930' 2024-11-24T18:53:06,478 DEBUG [RS:0;f2b92657890a:45405 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-24T18:53:06,479 DEBUG [RS:0;f2b92657890a:45405 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-24T18:53:06,480 DEBUG [RS:0;f2b92657890a:45405 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-24T18:53:06,480 INFO [RS:0;f2b92657890a:45405 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-24T18:53:06,480 INFO [RS:0;f2b92657890a:45405 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-24T18:53:06,480 WARN [f2b92657890a:41719 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-24T18:53:06,583 INFO [RS:0;f2b92657890a:45405 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=f2b92657890a%2C45405%2C1732474385930, suffix=, logDir=hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930, archiveDir=hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/oldWALs, maxLogs=32 2024-11-24T18:53:06,585 INFO [RS:0;f2b92657890a:45405 {}] monitor.StreamSlowMonitor(122): New stream slow monitor f2b92657890a%2C45405%2C1732474385930.1732474386585 2024-11-24T18:53:06,594 INFO [RS:0;f2b92657890a:45405 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.1732474386585 2024-11-24T18:53:06,596 DEBUG [RS:0;f2b92657890a:45405 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36183:36183),(127.0.0.1/127.0.0.1:35465:35465)] 2024-11-24T18:53:06,731 DEBUG [f2b92657890a:41719 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-24T18:53:06,732 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=f2b92657890a,45405,1732474385930 2024-11-24T18:53:06,735 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as f2b92657890a,45405,1732474385930, state=OPENING 2024-11-24T18:53:06,749 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-24T18:53:06,759 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41719-0x1016e3130f80000, quorum=127.0.0.1:49552, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T18:53:06,759 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45405-0x1016e3130f80001, quorum=127.0.0.1:49552, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T18:53:06,760 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-24T18:53:06,760 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=f2b92657890a,45405,1732474385930}] 2024-11-24T18:53:06,760 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-24T18:53:06,760 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-24T18:53:06,914 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-24T18:53:06,918 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36529, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-24T18:53:06,926 INFO [RS_OPEN_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-24T18:53:06,927 INFO [RS_OPEN_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-24T18:53:06,930 INFO [RS_OPEN_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=f2b92657890a%2C45405%2C1732474385930.meta, suffix=.meta, logDir=hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930, archiveDir=hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/oldWALs, maxLogs=32 2024-11-24T18:53:06,931 INFO [RS_OPEN_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor f2b92657890a%2C45405%2C1732474385930.meta.1732474386931.meta 2024-11-24T18:53:06,939 INFO [RS_OPEN_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.meta.1732474386931.meta 2024-11-24T18:53:06,940 DEBUG [RS_OPEN_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36183:36183),(127.0.0.1/127.0.0.1:35465:35465)] 2024-11-24T18:53:06,942 DEBUG [RS_OPEN_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-24T18:53:06,942 DEBUG [RS_OPEN_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-24T18:53:06,942 DEBUG [RS_OPEN_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-24T18:53:06,942 INFO [RS_OPEN_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-24T18:53:06,943 DEBUG [RS_OPEN_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-24T18:53:06,943 DEBUG [RS_OPEN_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T18:53:06,943 DEBUG [RS_OPEN_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-24T18:53:06,943 DEBUG [RS_OPEN_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-24T18:53:06,945 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-24T18:53:06,946 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-24T18:53:06,946 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T18:53:06,946 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T18:53:06,947 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-24T18:53:06,947 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-24T18:53:06,947 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T18:53:06,948 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T18:53:06,948 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-24T18:53:06,949 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-24T18:53:06,949 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T18:53:06,949 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T18:53:06,949 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-24T18:53:06,950 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-24T18:53:06,950 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T18:53:06,951 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T18:53:06,951 DEBUG [RS_OPEN_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-24T18:53:06,952 DEBUG [RS_OPEN_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/data/hbase/meta/1588230740 2024-11-24T18:53:06,953 DEBUG [RS_OPEN_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/data/hbase/meta/1588230740 2024-11-24T18:53:06,954 DEBUG [RS_OPEN_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-24T18:53:06,954 DEBUG [RS_OPEN_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-24T18:53:06,955 DEBUG [RS_OPEN_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-24T18:53:06,956 DEBUG [RS_OPEN_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-24T18:53:06,957 INFO [RS_OPEN_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=691277, jitterRate=-0.12099680304527283}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-24T18:53:06,957 DEBUG [RS_OPEN_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-24T18:53:06,958 DEBUG [RS_OPEN_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732474386943Writing region info on filesystem at 1732474386943Initializing all the Stores at 1732474386944 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732474386944Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732474386945 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732474386945Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732474386945Cleaning up temporary data from old regions at 1732474386955 (+10 ms)Running coprocessor post-open hooks at 1732474386957 (+2 ms)Region opened successfully at 1732474386958 (+1 ms) 2024-11-24T18:53:06,959 INFO [RS_OPEN_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732474386914 2024-11-24T18:53:06,962 DEBUG [RS_OPEN_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-24T18:53:06,962 INFO [RS_OPEN_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-24T18:53:06,963 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=f2b92657890a,45405,1732474385930 2024-11-24T18:53:06,964 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as f2b92657890a,45405,1732474385930, state=OPEN 2024-11-24T18:53:06,995 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41719-0x1016e3130f80000, quorum=127.0.0.1:49552, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-24T18:53:06,995 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45405-0x1016e3130f80001, quorum=127.0.0.1:49552, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-24T18:53:06,995 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=f2b92657890a,45405,1732474385930 2024-11-24T18:53:06,995 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-24T18:53:06,995 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-24T18:53:07,001 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-24T18:53:07,001 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=f2b92657890a,45405,1732474385930 in 235 msec 2024-11-24T18:53:07,006 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-24T18:53:07,006 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 674 msec 2024-11-24T18:53:07,007 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-24T18:53:07,007 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-24T18:53:07,009 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-24T18:53:07,009 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=f2b92657890a,45405,1732474385930, seqNum=-1] 2024-11-24T18:53:07,010 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-24T18:53:07,011 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47839, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-24T18:53:07,017 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 736 msec 2024-11-24T18:53:07,017 INFO [master/f2b92657890a:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732474387017, completionTime=-1 2024-11-24T18:53:07,017 INFO [master/f2b92657890a:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-24T18:53:07,017 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-24T18:53:07,019 INFO [master/f2b92657890a:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-24T18:53:07,019 INFO [master/f2b92657890a:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732474447019 2024-11-24T18:53:07,019 INFO [master/f2b92657890a:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732474507019 2024-11-24T18:53:07,019 INFO [master/f2b92657890a:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 1 msec 2024-11-24T18:53:07,020 INFO [master/f2b92657890a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=f2b92657890a,41719,1732474385757-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T18:53:07,020 INFO [master/f2b92657890a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=f2b92657890a,41719,1732474385757-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T18:53:07,020 INFO [master/f2b92657890a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=f2b92657890a,41719,1732474385757-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T18:53:07,020 INFO [master/f2b92657890a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-f2b92657890a:41719, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T18:53:07,020 INFO [master/f2b92657890a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-24T18:53:07,020 INFO [master/f2b92657890a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-24T18:53:07,022 DEBUG [master/f2b92657890a:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-24T18:53:07,024 INFO [master/f2b92657890a:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.033sec 2024-11-24T18:53:07,024 INFO [master/f2b92657890a:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-24T18:53:07,024 INFO [master/f2b92657890a:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-24T18:53:07,024 INFO [master/f2b92657890a:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-24T18:53:07,024 INFO [master/f2b92657890a:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-24T18:53:07,024 INFO [master/f2b92657890a:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-24T18:53:07,024 INFO [master/f2b92657890a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=f2b92657890a,41719,1732474385757-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-24T18:53:07,024 INFO [master/f2b92657890a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=f2b92657890a,41719,1732474385757-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-24T18:53:07,027 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-24T18:53:07,027 INFO [master/f2b92657890a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-24T18:53:07,027 INFO [master/f2b92657890a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=f2b92657890a,41719,1732474385757-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T18:53:07,066 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@21e18edd, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-24T18:53:07,067 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request f2b92657890a,41719,-1 for getting cluster id 2024-11-24T18:53:07,067 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-24T18:53:07,068 DEBUG [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '2ae8e624-f6fd-46d8-904d-a5c05fcfc37e' 2024-11-24T18:53:07,069 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-24T18:53:07,069 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "2ae8e624-f6fd-46d8-904d-a5c05fcfc37e" 2024-11-24T18:53:07,069 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@204512a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-24T18:53:07,069 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [f2b92657890a,41719,-1] 2024-11-24T18:53:07,070 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-24T18:53:07,070 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T18:53:07,071 INFO [HMaster-EventLoopGroup-7-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58366, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-24T18:53:07,072 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@344f4078, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-24T18:53:07,073 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-24T18:53:07,073 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=f2b92657890a,45405,1732474385930, seqNum=-1] 2024-11-24T18:53:07,074 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-24T18:53:07,075 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60880, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-24T18:53:07,077 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=f2b92657890a,41719,1732474385757 2024-11-24T18:53:07,077 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T18:53:07,080 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-24T18:53:07,094 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/f2b92657890a:0 server-side Connection retries=45 2024-11-24T18:53:07,094 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-24T18:53:07,094 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-24T18:53:07,094 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-24T18:53:07,094 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-24T18:53:07,094 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-24T18:53:07,094 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-24T18:53:07,095 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-24T18:53:07,095 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:43977 2024-11-24T18:53:07,097 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:43977 connecting to ZooKeeper ensemble=127.0.0.1:49552 2024-11-24T18:53:07,097 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T18:53:07,099 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T18:53:07,117 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:439770x0, quorum=127.0.0.1:49552, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-24T18:53:07,118 DEBUG [Time-limited test {}] zookeeper.ZKUtil(111): regionserver:43977-0x1016e3130f80002, quorum=127.0.0.1:49552, baseZNode=/hbase Set watcher on existing znode=/hbase/running 2024-11-24T18:53:07,118 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:43977-0x1016e3130f80002 connected 2024-11-24T18:53:07,118 DEBUG [pool-381-thread-1 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: INIT 2024-11-24T18:53:07,119 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-24T18:53:07,120 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-24T18:53:07,121 DEBUG [Time-limited test {}] zookeeper.ZKUtil(111): regionserver:43977-0x1016e3130f80002, quorum=127.0.0.1:49552, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-24T18:53:07,123 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:43977-0x1016e3130f80002, quorum=127.0.0.1:49552, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-24T18:53:07,124 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=43977 2024-11-24T18:53:07,124 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=43977 2024-11-24T18:53:07,124 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=43977 2024-11-24T18:53:07,127 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=43977 2024-11-24T18:53:07,127 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=43977 2024-11-24T18:53:07,129 INFO [RS:1;f2b92657890a:43977 {}] regionserver.HRegionServer(746): ClusterId : 2ae8e624-f6fd-46d8-904d-a5c05fcfc37e 2024-11-24T18:53:07,129 DEBUG [RS:1;f2b92657890a:43977 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-24T18:53:07,139 DEBUG [RS:1;f2b92657890a:43977 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-24T18:53:07,139 DEBUG [RS:1;f2b92657890a:43977 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-24T18:53:07,150 DEBUG [RS:1;f2b92657890a:43977 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-24T18:53:07,151 DEBUG [RS:1;f2b92657890a:43977 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@d93bcca, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=f2b92657890a/172.17.0.2:0 2024-11-24T18:53:07,165 DEBUG [RS:1;f2b92657890a:43977 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;f2b92657890a:43977 2024-11-24T18:53:07,165 INFO [RS:1;f2b92657890a:43977 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-24T18:53:07,165 INFO [RS:1;f2b92657890a:43977 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-24T18:53:07,165 DEBUG [RS:1;f2b92657890a:43977 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-24T18:53:07,166 INFO [RS:1;f2b92657890a:43977 {}] regionserver.HRegionServer(2659): reportForDuty to master=f2b92657890a,41719,1732474385757 with port=43977, startcode=1732474387094 2024-11-24T18:53:07,166 DEBUG [RS:1;f2b92657890a:43977 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-24T18:53:07,168 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34757, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.3 (auth:SIMPLE), service=RegionServerStatusService 2024-11-24T18:53:07,168 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41719 {}] master.ServerManager(363): Checking decommissioned status of RegionServer f2b92657890a,43977,1732474387094 2024-11-24T18:53:07,168 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41719 {}] master.ServerManager(517): Registering regionserver=f2b92657890a,43977,1732474387094 2024-11-24T18:53:07,170 DEBUG [RS:1;f2b92657890a:43977 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98 2024-11-24T18:53:07,170 DEBUG [RS:1;f2b92657890a:43977 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:37713 2024-11-24T18:53:07,170 DEBUG [RS:1;f2b92657890a:43977 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-24T18:53:07,180 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41719-0x1016e3130f80000, quorum=127.0.0.1:49552, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-24T18:53:07,181 DEBUG [RS:1;f2b92657890a:43977 {}] zookeeper.ZKUtil(111): regionserver:43977-0x1016e3130f80002, quorum=127.0.0.1:49552, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/f2b92657890a,43977,1732474387094 2024-11-24T18:53:07,181 WARN [RS:1;f2b92657890a:43977 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-24T18:53:07,181 INFO [RS:1;f2b92657890a:43977 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-24T18:53:07,181 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [f2b92657890a,43977,1732474387094] 2024-11-24T18:53:07,181 DEBUG [RS:1;f2b92657890a:43977 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,43977,1732474387094 2024-11-24T18:53:07,185 INFO [RS:1;f2b92657890a:43977 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-24T18:53:07,187 INFO [RS:1;f2b92657890a:43977 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-24T18:53:07,187 INFO [RS:1;f2b92657890a:43977 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-24T18:53:07,188 INFO [RS:1;f2b92657890a:43977 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T18:53:07,188 INFO [RS:1;f2b92657890a:43977 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-24T18:53:07,189 INFO [RS:1;f2b92657890a:43977 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-24T18:53:07,189 INFO [RS:1;f2b92657890a:43977 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-24T18:53:07,189 DEBUG [RS:1;f2b92657890a:43977 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/f2b92657890a:0, corePoolSize=1, maxPoolSize=1 2024-11-24T18:53:07,189 DEBUG [RS:1;f2b92657890a:43977 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/f2b92657890a:0, corePoolSize=1, maxPoolSize=1 2024-11-24T18:53:07,189 DEBUG [RS:1;f2b92657890a:43977 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/f2b92657890a:0, corePoolSize=1, maxPoolSize=1 2024-11-24T18:53:07,189 DEBUG [RS:1;f2b92657890a:43977 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/f2b92657890a:0, corePoolSize=1, maxPoolSize=1 2024-11-24T18:53:07,189 DEBUG [RS:1;f2b92657890a:43977 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/f2b92657890a:0, corePoolSize=1, maxPoolSize=1 2024-11-24T18:53:07,189 DEBUG [RS:1;f2b92657890a:43977 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/f2b92657890a:0, corePoolSize=2, maxPoolSize=2 2024-11-24T18:53:07,189 DEBUG [RS:1;f2b92657890a:43977 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/f2b92657890a:0, corePoolSize=1, maxPoolSize=1 2024-11-24T18:53:07,189 DEBUG [RS:1;f2b92657890a:43977 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/f2b92657890a:0, corePoolSize=1, maxPoolSize=1 2024-11-24T18:53:07,190 DEBUG [RS:1;f2b92657890a:43977 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/f2b92657890a:0, corePoolSize=1, maxPoolSize=1 2024-11-24T18:53:07,190 DEBUG [RS:1;f2b92657890a:43977 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/f2b92657890a:0, corePoolSize=1, maxPoolSize=1 2024-11-24T18:53:07,190 DEBUG [RS:1;f2b92657890a:43977 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/f2b92657890a:0, corePoolSize=1, maxPoolSize=1 2024-11-24T18:53:07,190 DEBUG [RS:1;f2b92657890a:43977 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/f2b92657890a:0, corePoolSize=1, maxPoolSize=1 2024-11-24T18:53:07,190 DEBUG [RS:1;f2b92657890a:43977 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/f2b92657890a:0, corePoolSize=3, maxPoolSize=3 2024-11-24T18:53:07,190 DEBUG [RS:1;f2b92657890a:43977 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/f2b92657890a:0, corePoolSize=3, maxPoolSize=3 2024-11-24T18:53:07,190 INFO [RS:1;f2b92657890a:43977 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-24T18:53:07,190 INFO [RS:1;f2b92657890a:43977 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-24T18:53:07,191 INFO [RS:1;f2b92657890a:43977 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T18:53:07,191 INFO [RS:1;f2b92657890a:43977 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-24T18:53:07,191 INFO [RS:1;f2b92657890a:43977 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-24T18:53:07,191 INFO [RS:1;f2b92657890a:43977 {}] hbase.ChoreService(168): Chore ScheduledChore name=f2b92657890a,43977,1732474387094-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-24T18:53:07,205 INFO [RS:1;f2b92657890a:43977 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-24T18:53:07,205 INFO [RS:1;f2b92657890a:43977 {}] hbase.ChoreService(168): Chore ScheduledChore name=f2b92657890a,43977,1732474387094-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T18:53:07,205 INFO [RS:1;f2b92657890a:43977 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T18:53:07,205 INFO [RS:1;f2b92657890a:43977 {}] regionserver.Replication(171): f2b92657890a,43977,1732474387094 started 2024-11-24T18:53:07,218 INFO [RS:1;f2b92657890a:43977 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T18:53:07,218 INFO [RS:1;f2b92657890a:43977 {}] regionserver.HRegionServer(1482): Serving as f2b92657890a,43977,1732474387094, RpcServer on f2b92657890a/172.17.0.2:43977, sessionid=0x1016e3130f80002 2024-11-24T18:53:07,218 DEBUG [RS:1;f2b92657890a:43977 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-24T18:53:07,218 INFO [Time-limited test {}] hbase.HBaseTestingUtil(2882): Started new server=Thread[RS:1;f2b92657890a:43977,5,FailOnTimeoutGroup] 2024-11-24T18:53:07,218 DEBUG [RS:1;f2b92657890a:43977 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager f2b92657890a,43977,1732474387094 2024-11-24T18:53:07,218 DEBUG [RS:1;f2b92657890a:43977 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'f2b92657890a,43977,1732474387094' 2024-11-24T18:53:07,218 DEBUG [RS:1;f2b92657890a:43977 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-24T18:53:07,219 INFO [Time-limited test {}] wal.TestLogRolling(207): Replication=2 2024-11-24T18:53:07,219 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-24T18:53:07,219 DEBUG [RS:1;f2b92657890a:43977 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-24T18:53:07,220 DEBUG [RS:1;f2b92657890a:43977 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-24T18:53:07,220 DEBUG [RS:1;f2b92657890a:43977 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-24T18:53:07,220 DEBUG [RS:1;f2b92657890a:43977 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager f2b92657890a,43977,1732474387094 2024-11-24T18:53:07,220 DEBUG [RS:1;f2b92657890a:43977 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'f2b92657890a,43977,1732474387094' 2024-11-24T18:53:07,220 DEBUG [RS:1;f2b92657890a:43977 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-24T18:53:07,220 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.AsyncConnectionImpl(321): The fetched master address is f2b92657890a,41719,1732474385757 2024-11-24T18:53:07,220 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@1a70d4bd 2024-11-24T18:53:07,220 DEBUG [RS:1;f2b92657890a:43977 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-24T18:53:07,221 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-24T18:53:07,221 DEBUG [RS:1;f2b92657890a:43977 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-24T18:53:07,221 INFO [RS:1;f2b92657890a:43977 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-24T18:53:07,221 INFO [RS:1;f2b92657890a:43977 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-24T18:53:07,222 INFO [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58374, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-24T18:53:07,223 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41719 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-24T18:53:07,223 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41719 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-24T18:53:07,223 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41719 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRollOnDatanodeDeath', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-24T18:53:07,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41719 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath 2024-11-24T18:53:07,226 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_PRE_OPERATION 2024-11-24T18:53:07,226 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T18:53:07,226 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41719 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRollOnDatanodeDeath" procId is: 4 2024-11-24T18:53:07,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41719 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-24T18:53:07,227 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-24T18:53:07,235 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32973 is added to blk_1073741835_1011 (size=393) 2024-11-24T18:53:07,235 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32783 is added to blk_1073741835_1011 (size=393) 2024-11-24T18:53:07,237 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => cd688812c78d2df61acf002dfcc44a35, NAME => 'TestLogRolling-testLogRollOnDatanodeDeath,,1732474387223.cd688812c78d2df61acf002dfcc44a35.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRollOnDatanodeDeath', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98 2024-11-24T18:53:07,244 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32783 is added to blk_1073741836_1012 (size=76) 2024-11-24T18:53:07,244 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32973 is added to blk_1073741836_1012 (size=76) 2024-11-24T18:53:07,245 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnDatanodeDeath,,1732474387223.cd688812c78d2df61acf002dfcc44a35.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T18:53:07,245 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1722): Closing cd688812c78d2df61acf002dfcc44a35, disabling compactions & flushes 2024-11-24T18:53:07,246 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnDatanodeDeath,,1732474387223.cd688812c78d2df61acf002dfcc44a35. 2024-11-24T18:53:07,246 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1732474387223.cd688812c78d2df61acf002dfcc44a35. 2024-11-24T18:53:07,246 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1732474387223.cd688812c78d2df61acf002dfcc44a35. after waiting 0 ms 2024-11-24T18:53:07,246 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnDatanodeDeath,,1732474387223.cd688812c78d2df61acf002dfcc44a35. 2024-11-24T18:53:07,246 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1732474387223.cd688812c78d2df61acf002dfcc44a35. 2024-11-24T18:53:07,246 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1676): Region close journal for cd688812c78d2df61acf002dfcc44a35: Waiting for close lock at 1732474387245Disabling compacts and flushes for region at 1732474387245Disabling writes for close at 1732474387246 (+1 ms)Writing region close event to WAL at 1732474387246Closed at 1732474387246 2024-11-24T18:53:07,248 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_ADD_TO_META 2024-11-24T18:53:07,248 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRollOnDatanodeDeath,,1732474387223.cd688812c78d2df61acf002dfcc44a35.","families":{"info":[{"qualifier":"regioninfo","vlen":75,"tag":[],"timestamp":"1732474387248"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732474387248"}]},"ts":"1732474387248"} 2024-11-24T18:53:07,252 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-24T18:53:07,253 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-24T18:53:07,254 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnDatanodeDeath","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732474387253"}]},"ts":"1732474387253"} 2024-11-24T18:53:07,256 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnDatanodeDeath, state=ENABLING in hbase:meta 2024-11-24T18:53:07,256 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=cd688812c78d2df61acf002dfcc44a35, ASSIGN}] 2024-11-24T18:53:07,258 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=cd688812c78d2df61acf002dfcc44a35, ASSIGN 2024-11-24T18:53:07,259 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=cd688812c78d2df61acf002dfcc44a35, ASSIGN; state=OFFLINE, location=f2b92657890a,45405,1732474385930; forceNewPlan=false, retain=false 2024-11-24T18:53:07,325 INFO [RS:1;f2b92657890a:43977 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=f2b92657890a%2C43977%2C1732474387094, suffix=, logDir=hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,43977,1732474387094, archiveDir=hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/oldWALs, maxLogs=32 2024-11-24T18:53:07,326 INFO [RS:1;f2b92657890a:43977 {}] monitor.StreamSlowMonitor(122): New stream slow monitor f2b92657890a%2C43977%2C1732474387094.1732474387326 2024-11-24T18:53:07,336 INFO [RS:1;f2b92657890a:43977 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,43977,1732474387094/f2b92657890a%2C43977%2C1732474387094.1732474387326 2024-11-24T18:53:07,339 DEBUG [RS:1;f2b92657890a:43977 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36183:36183),(127.0.0.1/127.0.0.1:35465:35465)] 2024-11-24T18:53:07,410 INFO [f2b92657890a:41719 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-11-24T18:53:07,411 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=cd688812c78d2df61acf002dfcc44a35, regionState=OPENING, regionLocation=f2b92657890a,45405,1732474385930 2024-11-24T18:53:07,416 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=cd688812c78d2df61acf002dfcc44a35, ASSIGN because future has completed 2024-11-24T18:53:07,418 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure cd688812c78d2df61acf002dfcc44a35, server=f2b92657890a,45405,1732474385930}] 2024-11-24T18:53:07,580 INFO [RS_OPEN_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRollOnDatanodeDeath,,1732474387223.cd688812c78d2df61acf002dfcc44a35. 2024-11-24T18:53:07,580 DEBUG [RS_OPEN_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => cd688812c78d2df61acf002dfcc44a35, NAME => 'TestLogRolling-testLogRollOnDatanodeDeath,,1732474387223.cd688812c78d2df61acf002dfcc44a35.', STARTKEY => '', ENDKEY => ''} 2024-11-24T18:53:07,581 DEBUG [RS_OPEN_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRollOnDatanodeDeath cd688812c78d2df61acf002dfcc44a35 2024-11-24T18:53:07,581 DEBUG [RS_OPEN_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnDatanodeDeath,,1732474387223.cd688812c78d2df61acf002dfcc44a35.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T18:53:07,582 DEBUG [RS_OPEN_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for cd688812c78d2df61acf002dfcc44a35 2024-11-24T18:53:07,582 DEBUG [RS_OPEN_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for cd688812c78d2df61acf002dfcc44a35 2024-11-24T18:53:07,585 INFO [StoreOpener-cd688812c78d2df61acf002dfcc44a35-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region cd688812c78d2df61acf002dfcc44a35 2024-11-24T18:53:07,587 INFO [StoreOpener-cd688812c78d2df61acf002dfcc44a35-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region cd688812c78d2df61acf002dfcc44a35 columnFamilyName info 2024-11-24T18:53:07,587 DEBUG [StoreOpener-cd688812c78d2df61acf002dfcc44a35-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T18:53:07,588 INFO [StoreOpener-cd688812c78d2df61acf002dfcc44a35-1 {}] regionserver.HStore(327): Store=cd688812c78d2df61acf002dfcc44a35/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T18:53:07,588 DEBUG [RS_OPEN_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for cd688812c78d2df61acf002dfcc44a35 2024-11-24T18:53:07,590 DEBUG [RS_OPEN_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/data/default/TestLogRolling-testLogRollOnDatanodeDeath/cd688812c78d2df61acf002dfcc44a35 2024-11-24T18:53:07,590 DEBUG [RS_OPEN_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/data/default/TestLogRolling-testLogRollOnDatanodeDeath/cd688812c78d2df61acf002dfcc44a35 2024-11-24T18:53:07,591 DEBUG [RS_OPEN_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for cd688812c78d2df61acf002dfcc44a35 2024-11-24T18:53:07,591 DEBUG [RS_OPEN_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for cd688812c78d2df61acf002dfcc44a35 2024-11-24T18:53:07,594 DEBUG [RS_OPEN_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for cd688812c78d2df61acf002dfcc44a35 2024-11-24T18:53:07,598 DEBUG [RS_OPEN_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/data/default/TestLogRolling-testLogRollOnDatanodeDeath/cd688812c78d2df61acf002dfcc44a35/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-24T18:53:07,598 INFO [RS_OPEN_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened cd688812c78d2df61acf002dfcc44a35; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=819583, jitterRate=0.04215380549430847}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-24T18:53:07,599 DEBUG [RS_OPEN_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for cd688812c78d2df61acf002dfcc44a35 2024-11-24T18:53:07,600 DEBUG [RS_OPEN_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for cd688812c78d2df61acf002dfcc44a35: Running coprocessor pre-open hook at 1732474387582Writing region info on filesystem at 1732474387582Initializing all the Stores at 1732474387584 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732474387584Cleaning up temporary data from old regions at 1732474387591 (+7 ms)Running coprocessor post-open hooks at 1732474387599 (+8 ms)Region opened successfully at 1732474387599 2024-11-24T18:53:07,601 INFO [RS_OPEN_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRollOnDatanodeDeath,,1732474387223.cd688812c78d2df61acf002dfcc44a35., pid=6, masterSystemTime=1732474387575 2024-11-24T18:53:07,603 DEBUG [RS_OPEN_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRollOnDatanodeDeath,,1732474387223.cd688812c78d2df61acf002dfcc44a35. 2024-11-24T18:53:07,603 INFO [RS_OPEN_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRollOnDatanodeDeath,,1732474387223.cd688812c78d2df61acf002dfcc44a35. 2024-11-24T18:53:07,604 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=cd688812c78d2df61acf002dfcc44a35, regionState=OPEN, openSeqNum=2, regionLocation=f2b92657890a,45405,1732474385930 2024-11-24T18:53:07,606 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure cd688812c78d2df61acf002dfcc44a35, server=f2b92657890a,45405,1732474385930 because future has completed 2024-11-24T18:53:07,610 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-24T18:53:07,611 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure cd688812c78d2df61acf002dfcc44a35, server=f2b92657890a,45405,1732474385930 in 190 msec 2024-11-24T18:53:07,613 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-24T18:53:07,613 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=cd688812c78d2df61acf002dfcc44a35, ASSIGN in 355 msec 2024-11-24T18:53:07,614 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-24T18:53:07,614 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnDatanodeDeath","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732474387614"}]},"ts":"1732474387614"} 2024-11-24T18:53:07,617 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnDatanodeDeath, state=ENABLED in hbase:meta 2024-11-24T18:53:07,618 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_POST_OPERATION 2024-11-24T18:53:07,620 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath in 395 msec 2024-11-24T18:53:07,789 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-24T18:53:07,793 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:53:07,807 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:53:07,810 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:53:07,810 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:53:08,328 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-24T18:53:08,328 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-24T18:53:08,330 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath 2024-11-24T18:53:08,330 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath Metrics about Tables on a single HBase RegionServer 2024-11-24T18:53:08,331 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-24T18:53:08,331 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-24T18:53:08,332 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-24T18:53:08,332 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-24T18:53:12,418 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRollOnDatanodeDeath' 2024-11-24T18:53:13,089 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-24T18:53:13,093 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:53:13,121 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:53:13,126 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:53:13,126 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:53:17,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41719 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-24T18:53:17,273 INFO [RPCClient-NioEventLoopGroup-4-11 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRollOnDatanodeDeath completed 2024-11-24T18:53:17,273 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRollOnDatanodeDeath,, stopping at row=TestLogRolling-testLogRollOnDatanodeDeath ,, for max=2147483647 with caching=100 2024-11-24T18:53:17,282 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRollOnDatanodeDeath 2024-11-24T18:53:17,282 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRollOnDatanodeDeath,,1732474387223.cd688812c78d2df61acf002dfcc44a35. 2024-11-24T18:53:17,298 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T18:53:17,301 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-24T18:53:17,301 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-24T18:53:17,302 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-24T18:53:17,302 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-24T18:53:17,302 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@30008f24{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e6689f1-50c4-dda5-193a-9d3c069fe268/hadoop.log.dir/,AVAILABLE} 2024-11-24T18:53:17,302 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2ab5b96c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-24T18:53:17,396 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3fd17220{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e6689f1-50c4-dda5-193a-9d3c069fe268/java.io.tmpdir/jetty-localhost-36211-hadoop-hdfs-3_4_1-tests_jar-_-any-6639305715672924208/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T18:53:17,397 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4c68f920{HTTP/1.1, (http/1.1)}{localhost:36211} 2024-11-24T18:53:17,397 INFO [Time-limited test {}] server.Server(415): Started @126749ms 2024-11-24T18:53:17,398 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-24T18:53:17,425 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T18:53:17,429 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-24T18:53:17,430 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-24T18:53:17,430 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-24T18:53:17,430 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-24T18:53:17,430 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3f85c2b2{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e6689f1-50c4-dda5-193a-9d3c069fe268/hadoop.log.dir/,AVAILABLE} 2024-11-24T18:53:17,431 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@64b89ed5{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-24T18:53:17,523 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@6d82aaea{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e6689f1-50c4-dda5-193a-9d3c069fe268/java.io.tmpdir/jetty-localhost-38891-hadoop-hdfs-3_4_1-tests_jar-_-any-5530535418689052597/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T18:53:17,523 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3904e150{HTTP/1.1, (http/1.1)}{localhost:38891} 2024-11-24T18:53:17,523 INFO [Time-limited test {}] server.Server(415): Started @126875ms 2024-11-24T18:53:17,524 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-24T18:53:17,558 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T18:53:17,562 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-24T18:53:17,563 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-24T18:53:17,563 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-24T18:53:17,563 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-24T18:53:17,564 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6b21f544{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e6689f1-50c4-dda5-193a-9d3c069fe268/hadoop.log.dir/,AVAILABLE} 2024-11-24T18:53:17,565 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@47432b7b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-24T18:53:17,666 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@20faceaa{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e6689f1-50c4-dda5-193a-9d3c069fe268/java.io.tmpdir/jetty-localhost-35795-hadoop-hdfs-3_4_1-tests_jar-_-any-6413547120275653157/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T18:53:17,666 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3e1ad43e{HTTP/1.1, (http/1.1)}{localhost:35795} 2024-11-24T18:53:17,667 INFO [Time-limited test {}] server.Server(415): Started @127019ms 2024-11-24T18:53:17,668 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-24T18:53:18,887 WARN [Thread-868 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e6689f1-50c4-dda5-193a-9d3c069fe268/cluster_18358266-8dd0-4c2e-1aeb-38759830aa93/data/data5/current/BP-829498211-172.17.0.2-1732474383219/current, will proceed with Du for space computation calculation, 2024-11-24T18:53:18,887 WARN [Thread-869 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e6689f1-50c4-dda5-193a-9d3c069fe268/cluster_18358266-8dd0-4c2e-1aeb-38759830aa93/data/data6/current/BP-829498211-172.17.0.2-1732474383219/current, will proceed with Du for space computation calculation, 2024-11-24T18:53:18,905 WARN [Thread-809 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-24T18:53:18,907 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xd4ce75371897481d with lease ID 0xddbbb5e26caa0122: Processing first storage report for DS-ca721525-2760-4643-ab64-d1434814772c from datanode DatanodeRegistration(127.0.0.1:37367, datanodeUuid=b8ca2119-ac3b-47a1-a2c3-1d0f556c0334, infoPort=45895, infoSecurePort=0, ipcPort=42977, storageInfo=lv=-57;cid=testClusterID;nsid=1784485339;c=1732474383219) 2024-11-24T18:53:18,907 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd4ce75371897481d with lease ID 0xddbbb5e26caa0122: from storage DS-ca721525-2760-4643-ab64-d1434814772c node DatanodeRegistration(127.0.0.1:37367, datanodeUuid=b8ca2119-ac3b-47a1-a2c3-1d0f556c0334, infoPort=45895, infoSecurePort=0, ipcPort=42977, storageInfo=lv=-57;cid=testClusterID;nsid=1784485339;c=1732474383219), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T18:53:18,907 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xd4ce75371897481d with lease ID 0xddbbb5e26caa0122: Processing first storage report for DS-faab04d3-903b-4220-88dd-54294f336ede from datanode DatanodeRegistration(127.0.0.1:37367, datanodeUuid=b8ca2119-ac3b-47a1-a2c3-1d0f556c0334, infoPort=45895, infoSecurePort=0, ipcPort=42977, storageInfo=lv=-57;cid=testClusterID;nsid=1784485339;c=1732474383219) 2024-11-24T18:53:18,907 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd4ce75371897481d with lease ID 0xddbbb5e26caa0122: from storage DS-faab04d3-903b-4220-88dd-54294f336ede node DatanodeRegistration(127.0.0.1:37367, datanodeUuid=b8ca2119-ac3b-47a1-a2c3-1d0f556c0334, infoPort=45895, infoSecurePort=0, ipcPort=42977, storageInfo=lv=-57;cid=testClusterID;nsid=1784485339;c=1732474383219), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T18:53:18,936 WARN [Thread-879 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e6689f1-50c4-dda5-193a-9d3c069fe268/cluster_18358266-8dd0-4c2e-1aeb-38759830aa93/data/data7/current/BP-829498211-172.17.0.2-1732474383219/current, will proceed with Du for space computation calculation, 2024-11-24T18:53:18,936 WARN [Thread-880 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e6689f1-50c4-dda5-193a-9d3c069fe268/cluster_18358266-8dd0-4c2e-1aeb-38759830aa93/data/data8/current/BP-829498211-172.17.0.2-1732474383219/current, will proceed with Du for space computation calculation, 2024-11-24T18:53:18,958 WARN [Thread-831 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-24T18:53:18,962 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x7e2c403155ffaaf2 with lease ID 0xddbbb5e26caa0123: Processing first storage report for DS-feb9aff5-069b-483f-95c1-d8393c723b1f from datanode DatanodeRegistration(127.0.0.1:45993, datanodeUuid=75eae523-6690-46a3-8eee-6948bb1165b7, infoPort=32823, infoSecurePort=0, ipcPort=36131, storageInfo=lv=-57;cid=testClusterID;nsid=1784485339;c=1732474383219) 2024-11-24T18:53:18,962 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x7e2c403155ffaaf2 with lease ID 0xddbbb5e26caa0123: from storage DS-feb9aff5-069b-483f-95c1-d8393c723b1f node DatanodeRegistration(127.0.0.1:45993, datanodeUuid=75eae523-6690-46a3-8eee-6948bb1165b7, infoPort=32823, infoSecurePort=0, ipcPort=36131, storageInfo=lv=-57;cid=testClusterID;nsid=1784485339;c=1732474383219), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T18:53:18,962 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x7e2c403155ffaaf2 with lease ID 0xddbbb5e26caa0123: Processing first storage report for DS-b90da86a-3bec-4c79-96d8-e2a760b63474 from datanode DatanodeRegistration(127.0.0.1:45993, datanodeUuid=75eae523-6690-46a3-8eee-6948bb1165b7, infoPort=32823, infoSecurePort=0, ipcPort=36131, storageInfo=lv=-57;cid=testClusterID;nsid=1784485339;c=1732474383219) 2024-11-24T18:53:18,962 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x7e2c403155ffaaf2 with lease ID 0xddbbb5e26caa0123: from storage DS-b90da86a-3bec-4c79-96d8-e2a760b63474 node DatanodeRegistration(127.0.0.1:45993, datanodeUuid=75eae523-6690-46a3-8eee-6948bb1165b7, infoPort=32823, infoSecurePort=0, ipcPort=36131, storageInfo=lv=-57;cid=testClusterID;nsid=1784485339;c=1732474383219), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T18:53:18,991 WARN [Thread-890 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e6689f1-50c4-dda5-193a-9d3c069fe268/cluster_18358266-8dd0-4c2e-1aeb-38759830aa93/data/data9/current/BP-829498211-172.17.0.2-1732474383219/current, will proceed with Du for space computation calculation, 2024-11-24T18:53:18,991 WARN [Thread-891 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e6689f1-50c4-dda5-193a-9d3c069fe268/cluster_18358266-8dd0-4c2e-1aeb-38759830aa93/data/data10/current/BP-829498211-172.17.0.2-1732474383219/current, will proceed with Du for space computation calculation, 2024-11-24T18:53:19,008 WARN [Thread-853 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-24T18:53:19,011 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x1893ddc6c18e5faf with lease ID 0xddbbb5e26caa0124: Processing first storage report for DS-0716b5d6-ffd7-4bc5-9c59-8ee7cde1e954 from datanode DatanodeRegistration(127.0.0.1:43133, datanodeUuid=61f4bc96-d307-4904-83c2-d15200586874, infoPort=46121, infoSecurePort=0, ipcPort=37863, storageInfo=lv=-57;cid=testClusterID;nsid=1784485339;c=1732474383219) 2024-11-24T18:53:19,011 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x1893ddc6c18e5faf with lease ID 0xddbbb5e26caa0124: from storage DS-0716b5d6-ffd7-4bc5-9c59-8ee7cde1e954 node DatanodeRegistration(127.0.0.1:43133, datanodeUuid=61f4bc96-d307-4904-83c2-d15200586874, infoPort=46121, infoSecurePort=0, ipcPort=37863, storageInfo=lv=-57;cid=testClusterID;nsid=1784485339;c=1732474383219), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T18:53:19,011 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x1893ddc6c18e5faf with lease ID 0xddbbb5e26caa0124: Processing first storage report for DS-3de8af57-fa8a-4dc1-8290-7fc9d1fbbc63 from datanode DatanodeRegistration(127.0.0.1:43133, datanodeUuid=61f4bc96-d307-4904-83c2-d15200586874, infoPort=46121, infoSecurePort=0, ipcPort=37863, storageInfo=lv=-57;cid=testClusterID;nsid=1784485339;c=1732474383219) 2024-11-24T18:53:19,011 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x1893ddc6c18e5faf with lease ID 0xddbbb5e26caa0124: from storage DS-3de8af57-fa8a-4dc1-8290-7fc9d1fbbc63 node DatanodeRegistration(127.0.0.1:43133, datanodeUuid=61f4bc96-d307-4904-83c2-d15200586874, infoPort=46121, infoSecurePort=0, ipcPort=37863, storageInfo=lv=-57;cid=testClusterID;nsid=1784485339;c=1732474383219), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T18:53:19,100 WARN [ResponseProcessor for block BP-829498211-172.17.0.2-1732474383219:blk_1073741830_1006 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-829498211-172.17.0.2-1732474383219:blk_1073741830_1006 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T18:53:19,100 WARN [ResponseProcessor for block BP-829498211-172.17.0.2-1732474383219:blk_1073741833_1009 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-829498211-172.17.0.2-1732474383219:blk_1073741833_1009 java.io.IOException: Bad response ERROR for BP-829498211-172.17.0.2-1732474383219:blk_1073741833_1009 from datanode DatanodeInfoWithStorage[127.0.0.1:32973,DS-7c4fcb9f-751f-4a33-b466-2d1e1999346e,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T18:53:19,100 WARN [ResponseProcessor for block BP-829498211-172.17.0.2-1732474383219:blk_1073741834_1010 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-829498211-172.17.0.2-1732474383219:blk_1073741834_1010 java.io.IOException: Bad response ERROR for BP-829498211-172.17.0.2-1732474383219:blk_1073741834_1010 from datanode DatanodeInfoWithStorage[127.0.0.1:32973,DS-7c4fcb9f-751f-4a33-b466-2d1e1999346e,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T18:53:19,101 WARN [ResponseProcessor for block BP-829498211-172.17.0.2-1732474383219:blk_1073741837_1013 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-829498211-172.17.0.2-1732474383219:blk_1073741837_1013 java.io.IOException: Bad response ERROR for BP-829498211-172.17.0.2-1732474383219:blk_1073741837_1013 from datanode DatanodeInfoWithStorage[127.0.0.1:32973,DS-7c4fcb9f-751f-4a33-b466-2d1e1999346e,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T18:53:19,102 WARN [DataStreamer for file /user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,43977,1732474387094/f2b92657890a%2C43977%2C1732474387094.1732474387326 block BP-829498211-172.17.0.2-1732474383219:blk_1073741837_1013 {}] hdfs.DataStreamer(1731): Error Recovery for BP-829498211-172.17.0.2-1732474383219:blk_1073741837_1013 in pipeline [DatanodeInfoWithStorage[127.0.0.1:32783,DS-8bdd6e5e-876e-4f73-9203-c2f186854fcc,DISK], DatanodeInfoWithStorage[127.0.0.1:32973,DS-7c4fcb9f-751f-4a33-b466-2d1e1999346e,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:32973,DS-7c4fcb9f-751f-4a33-b466-2d1e1999346e,DISK]) is bad. 2024-11-24T18:53:19,102 WARN [DataStreamer for file /user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/MasterData/WALs/f2b92657890a,41719,1732474385757/f2b92657890a%2C41719%2C1732474385757.1732474386067 block BP-829498211-172.17.0.2-1732474383219:blk_1073741830_1006 {}] hdfs.DataStreamer(1731): Error Recovery for BP-829498211-172.17.0.2-1732474383219:blk_1073741830_1006 in pipeline [DatanodeInfoWithStorage[127.0.0.1:32973,DS-7c4fcb9f-751f-4a33-b466-2d1e1999346e,DISK], DatanodeInfoWithStorage[127.0.0.1:32783,DS-8bdd6e5e-876e-4f73-9203-c2f186854fcc,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:32973,DS-7c4fcb9f-751f-4a33-b466-2d1e1999346e,DISK]) is bad. 2024-11-24T18:53:19,102 WARN [DataStreamer for file /user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.1732474386585 block BP-829498211-172.17.0.2-1732474383219:blk_1073741833_1009 {}] hdfs.DataStreamer(1731): Error Recovery for BP-829498211-172.17.0.2-1732474383219:blk_1073741833_1009 in pipeline [DatanodeInfoWithStorage[127.0.0.1:32783,DS-8bdd6e5e-876e-4f73-9203-c2f186854fcc,DISK], DatanodeInfoWithStorage[127.0.0.1:32973,DS-7c4fcb9f-751f-4a33-b466-2d1e1999346e,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:32973,DS-7c4fcb9f-751f-4a33-b466-2d1e1999346e,DISK]) is bad. 2024-11-24T18:53:19,102 WARN [DataStreamer for file /user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.meta.1732474386931.meta block BP-829498211-172.17.0.2-1732474383219:blk_1073741834_1010 {}] hdfs.DataStreamer(1731): Error Recovery for BP-829498211-172.17.0.2-1732474383219:blk_1073741834_1010 in pipeline [DatanodeInfoWithStorage[127.0.0.1:32783,DS-8bdd6e5e-876e-4f73-9203-c2f186854fcc,DISK], DatanodeInfoWithStorage[127.0.0.1:32973,DS-7c4fcb9f-751f-4a33-b466-2d1e1999346e,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:32973,DS-7c4fcb9f-751f-4a33-b466-2d1e1999346e,DISK]) is bad. 2024-11-24T18:53:19,101 WARN [PacketResponder: BP-829498211-172.17.0.2-1732474383219:blk_1073741833_1009, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:32973] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Broken pipe at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T18:53:19,101 WARN [PacketResponder: BP-829498211-172.17.0.2-1732474383219:blk_1073741834_1010, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:32973] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T18:53:19,101 WARN [PacketResponder: BP-829498211-172.17.0.2-1732474383219:blk_1073741837_1013, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:32973] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T18:53:19,103 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_582594852_22 at /127.0.0.1:41196 [Receiving block BP-829498211-172.17.0.2-1732474383219:blk_1073741837_1013] {}] datanode.DataXceiver(331): 127.0.0.1:32783:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41196 dst: /127.0.0.1:32783 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T18:53:19,103 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1902154818_22 at /127.0.0.1:41150 [Receiving block BP-829498211-172.17.0.2-1732474383219:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:32783:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41150 dst: /127.0.0.1:32783 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T18:53:19,104 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1902154818_22 at /127.0.0.1:41136 [Receiving block BP-829498211-172.17.0.2-1732474383219:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:32783:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41136 dst: /127.0.0.1:32783 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T18:53:19,104 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_333290757_22 at /127.0.0.1:41116 [Receiving block BP-829498211-172.17.0.2-1732474383219:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:32783:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41116 dst: /127.0.0.1:32783 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T18:53:19,103 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_333290757_22 at /127.0.0.1:53344 [Receiving block BP-829498211-172.17.0.2-1732474383219:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:32973:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:53344 dst: /127.0.0.1:32973 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T18:53:19,104 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1902154818_22 at /127.0.0.1:53368 [Receiving block BP-829498211-172.17.0.2-1732474383219:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:32973:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:53368 dst: /127.0.0.1:32973 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T18:53:19,104 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_582594852_22 at /127.0.0.1:53402 [Receiving block BP-829498211-172.17.0.2-1732474383219:blk_1073741837_1013] {}] datanode.DataXceiver(331): 127.0.0.1:32973:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:53402 dst: /127.0.0.1:32973 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T18:53:19,104 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1902154818_22 at /127.0.0.1:53378 [Receiving block BP-829498211-172.17.0.2-1732474383219:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:32973:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:53378 dst: /127.0.0.1:32973 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T18:53:19,106 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@272348fe{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T18:53:19,106 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@10b53169{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-24T18:53:19,107 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-24T18:53:19,107 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4bb19ef9{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-24T18:53:19,107 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@32403ac6{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e6689f1-50c4-dda5-193a-9d3c069fe268/hadoop.log.dir/,STOPPED} 2024-11-24T18:53:19,108 WARN [BP-829498211-172.17.0.2-1732474383219 heartbeating to localhost/127.0.0.1:37713 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-24T18:53:19,108 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-24T18:53:19,108 WARN [BP-829498211-172.17.0.2-1732474383219 heartbeating to localhost/127.0.0.1:37713 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-829498211-172.17.0.2-1732474383219 (Datanode Uuid adeb25d7-9417-4898-a7a7-6d9e033f0ea2) service to localhost/127.0.0.1:37713 2024-11-24T18:53:19,108 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-24T18:53:19,109 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e6689f1-50c4-dda5-193a-9d3c069fe268/cluster_18358266-8dd0-4c2e-1aeb-38759830aa93/data/data3/current/BP-829498211-172.17.0.2-1732474383219 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T18:53:19,109 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e6689f1-50c4-dda5-193a-9d3c069fe268/cluster_18358266-8dd0-4c2e-1aeb-38759830aa93/data/data4/current/BP-829498211-172.17.0.2-1732474383219 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T18:53:19,109 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-24T18:53:19,110 WARN [DataStreamer for file /user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.1732474386585 block BP-829498211-172.17.0.2-1732474383219:blk_1073741833_1009 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741833_1009 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T18:53:19,110 WARN [DataStreamer for file /user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.meta.1732474386931.meta block BP-829498211-172.17.0.2-1732474383219:blk_1073741834_1010 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741834_1010 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T18:53:19,110 WARN [DataStreamer for file /user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/MasterData/WALs/f2b92657890a,41719,1732474385757/f2b92657890a%2C41719%2C1732474385757.1732474386067 block BP-829498211-172.17.0.2-1732474383219:blk_1073741830_1006 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741830_1006 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T18:53:19,112 WARN [DataStreamer for file /user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,43977,1732474387094/f2b92657890a%2C43977%2C1732474387094.1732474387326 block BP-829498211-172.17.0.2-1732474383219:blk_1073741837_1013 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741837_1013 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T18:53:19,112 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5538b075{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T18:53:19,113 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3f87a993{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-24T18:53:19,113 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-24T18:53:19,113 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1bb5d847{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-24T18:53:19,113 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1bf32f74{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e6689f1-50c4-dda5-193a-9d3c069fe268/hadoop.log.dir/,STOPPED} 2024-11-24T18:53:19,114 WARN [BP-829498211-172.17.0.2-1732474383219 heartbeating to localhost/127.0.0.1:37713 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-24T18:53:19,114 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-24T18:53:19,114 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-24T18:53:19,114 WARN [BP-829498211-172.17.0.2-1732474383219 heartbeating to localhost/127.0.0.1:37713 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-829498211-172.17.0.2-1732474383219 (Datanode Uuid a91e4357-e48c-4cf9-a1b5-74d0a339fe61) service to localhost/127.0.0.1:37713 2024-11-24T18:53:19,115 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e6689f1-50c4-dda5-193a-9d3c069fe268/cluster_18358266-8dd0-4c2e-1aeb-38759830aa93/data/data1/current/BP-829498211-172.17.0.2-1732474383219 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T18:53:19,115 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e6689f1-50c4-dda5-193a-9d3c069fe268/cluster_18358266-8dd0-4c2e-1aeb-38759830aa93/data/data2/current/BP-829498211-172.17.0.2-1732474383219 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T18:53:19,115 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-24T18:53:19,119 DEBUG [RPCClient-NioEventLoopGroup-4-10 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRollOnDatanodeDeath', row='row0002', locateType=CURRENT is [region=TestLogRolling-testLogRollOnDatanodeDeath,,1732474387223.cd688812c78d2df61acf002dfcc44a35., hostname=f2b92657890a,45405,1732474385930, seqNum=2] 2024-11-24T18:53:19,120 ERROR [FSHLog-0-hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98-prefix:f2b92657890a,45405,1732474385930 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:32783,DS-8bdd6e5e-876e-4f73-9203-c2f186854fcc,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T18:53:19,120 WARN [FSHLog-0-hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98-prefix:f2b92657890a,45405,1732474385930 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:32783,DS-8bdd6e5e-876e-4f73-9203-c2f186854fcc,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T18:53:19,121 DEBUG [regionserver/f2b92657890a:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog f2b92657890a%2C45405%2C1732474385930:(num 1732474386585) roll requested 2024-11-24T18:53:19,121 INFO [regionserver/f2b92657890a:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor f2b92657890a%2C45405%2C1732474385930.1732474399121 2024-11-24T18:53:19,123 WARN [Thread-901 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741838_1018 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T18:53:19,124 WARN [Thread-901 {}] hdfs.DataStreamer(1731): Error Recovery for BP-829498211-172.17.0.2-1732474383219:blk_1073741838_1018 in pipeline [DatanodeInfoWithStorage[127.0.0.1:32783,DS-8bdd6e5e-876e-4f73-9203-c2f186854fcc,DISK], DatanodeInfoWithStorage[127.0.0.1:37367,DS-ca721525-2760-4643-ab64-d1434814772c,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:32783,DS-8bdd6e5e-876e-4f73-9203-c2f186854fcc,DISK]) is bad. 2024-11-24T18:53:19,124 WARN [Thread-901 {}] hdfs.DataStreamer(1850): Abandoning BP-829498211-172.17.0.2-1732474383219:blk_1073741838_1018 2024-11-24T18:53:19,126 WARN [Thread-901 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:32783,DS-8bdd6e5e-876e-4f73-9203-c2f186854fcc,DISK] 2024-11-24T18:53:19,131 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:53:19,131 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:53:19,131 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:53:19,131 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:53:19,131 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:53:19,131 INFO [regionserver/f2b92657890a:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.1732474386585 with entries=1, filesize=455 B; new WAL /user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.1732474399121 2024-11-24T18:53:19,132 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:32783,DS-8bdd6e5e-876e-4f73-9203-c2f186854fcc,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T18:53:19,132 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:32783,DS-8bdd6e5e-876e-4f73-9203-c2f186854fcc,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T18:53:19,132 DEBUG [regionserver/f2b92657890a:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46121:46121),(127.0.0.1/127.0.0.1:32823:32823)] 2024-11-24T18:53:19,132 DEBUG [regionserver/f2b92657890a:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.1732474386585 is not closed yet, will try archiving it next time 2024-11-24T18:53:19,133 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(47): Initialize RecoverLeaseFSUtils 2024-11-24T18:53:19,133 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(59): set recoverLeaseMethod to org.apache.hadoop.fs.LeaseRecoverable.recoverLease() 2024-11-24T18:53:19,133 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.1732474386585 2024-11-24T18:53:19,135 WARN [IPC Server handler 4 on default port 37713 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.1732474386585 has not been closed. Lease recovery is in progress. RecoveryId = 1020 for block blk_1073741833_1009 2024-11-24T18:53:19,138 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.1732474386585 after 4ms 2024-11-24T18:53:19,191 INFO [regionserver/f2b92657890a:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:32783,DS-8bdd6e5e-876e-4f73-9203-c2f186854fcc,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T18:53:20,042 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:32783,DS-8bdd6e5e-876e-4f73-9203-c2f186854fcc,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T18:53:21,133 INFO [regionserver/f2b92657890a:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:32783,DS-8bdd6e5e-876e-4f73-9203-c2f186854fcc,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T18:53:21,134 INFO [Time-limited test {}] wal.TestLogRolling(261): log.getCurrentFileName(): hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.1732474399121 2024-11-24T18:53:21,135 WARN [ResponseProcessor for block BP-829498211-172.17.0.2-1732474383219:blk_1073741839_1019 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-829498211-172.17.0.2-1732474383219:blk_1073741839_1019 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T18:53:21,136 WARN [DataStreamer for file /user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.1732474399121 block BP-829498211-172.17.0.2-1732474383219:blk_1073741839_1019 {}] hdfs.DataStreamer(1731): Error Recovery for BP-829498211-172.17.0.2-1732474383219:blk_1073741839_1019 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43133,DS-0716b5d6-ffd7-4bc5-9c59-8ee7cde1e954,DISK], DatanodeInfoWithStorage[127.0.0.1:45993,DS-feb9aff5-069b-483f-95c1-d8393c723b1f,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:43133,DS-0716b5d6-ffd7-4bc5-9c59-8ee7cde1e954,DISK]) is bad. 2024-11-24T18:53:21,137 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1902154818_22 at /127.0.0.1:40410 [Receiving block BP-829498211-172.17.0.2-1732474383219:blk_1073741839_1019] {}] datanode.DataXceiver(331): 127.0.0.1:43133:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:40410 dst: /127.0.0.1:43133 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T18:53:21,138 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1902154818_22 at /127.0.0.1:48798 [Receiving block BP-829498211-172.17.0.2-1732474383219:blk_1073741839_1019] {}] datanode.DataXceiver(331): 127.0.0.1:45993:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:48798 dst: /127.0.0.1:45993 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T18:53:21,141 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@20faceaa{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T18:53:21,141 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3e1ad43e{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-24T18:53:21,141 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-24T18:53:21,142 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@47432b7b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-24T18:53:21,142 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6b21f544{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e6689f1-50c4-dda5-193a-9d3c069fe268/hadoop.log.dir/,STOPPED} 2024-11-24T18:53:21,144 WARN [BP-829498211-172.17.0.2-1732474383219 heartbeating to localhost/127.0.0.1:37713 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-24T18:53:21,144 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-24T18:53:21,144 WARN [BP-829498211-172.17.0.2-1732474383219 heartbeating to localhost/127.0.0.1:37713 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-829498211-172.17.0.2-1732474383219 (Datanode Uuid 61f4bc96-d307-4904-83c2-d15200586874) service to localhost/127.0.0.1:37713 2024-11-24T18:53:21,144 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-24T18:53:21,145 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e6689f1-50c4-dda5-193a-9d3c069fe268/cluster_18358266-8dd0-4c2e-1aeb-38759830aa93/data/data9/current/BP-829498211-172.17.0.2-1732474383219 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T18:53:21,145 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e6689f1-50c4-dda5-193a-9d3c069fe268/cluster_18358266-8dd0-4c2e-1aeb-38759830aa93/data/data10/current/BP-829498211-172.17.0.2-1732474383219 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T18:53:21,145 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-24T18:53:21,192 INFO [regionserver/f2b92657890a:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:32783,DS-8bdd6e5e-876e-4f73-9203-c2f186854fcc,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T18:53:22,043 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:32783,DS-8bdd6e5e-876e-4f73-9203-c2f186854fcc,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T18:53:23,133 INFO [regionserver/f2b92657890a:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:32783,DS-8bdd6e5e-876e-4f73-9203-c2f186854fcc,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T18:53:23,134 WARN [regionserver/f2b92657890a:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:45993,DS-feb9aff5-069b-483f-95c1-d8393c723b1f,DISK]] 2024-11-24T18:53:23,134 DEBUG [regionserver/f2b92657890a:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog f2b92657890a%2C45405%2C1732474385930:(num 1732474399121) roll requested 2024-11-24T18:53:23,135 INFO [regionserver/f2b92657890a:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor f2b92657890a%2C45405%2C1732474385930.1732474403135 2024-11-24T18:53:23,140 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.1732474386585 after 4007ms 2024-11-24T18:53:23,146 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:53:23,147 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:53:23,147 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:53:23,147 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:53:23,147 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:53:23,147 INFO [regionserver/f2b92657890a:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.1732474399121 with entries=2, filesize=2.37 KB; new WAL /user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.1732474403135 2024-11-24T18:53:23,148 DEBUG [regionserver/f2b92657890a:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:32823:32823),(127.0.0.1/127.0.0.1:45895:45895)] 2024-11-24T18:53:23,148 DEBUG [regionserver/f2b92657890a:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.1732474386585 is not closed yet, will try archiving it next time 2024-11-24T18:53:23,148 DEBUG [regionserver/f2b92657890a:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.1732474399121 is not closed yet, will try archiving it next time 2024-11-24T18:53:23,150 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45993 is added to blk_1073741839_1021 (size=2431) 2024-11-24T18:53:23,151 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-11-24T18:53:23,192 INFO [regionserver/f2b92657890a:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:32783,DS-8bdd6e5e-876e-4f73-9203-c2f186854fcc,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T18:53:23,551 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.1732474386585 is not closed yet, will try archiving it next time 2024-11-24T18:53:24,043 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:32783,DS-8bdd6e5e-876e-4f73-9203-c2f186854fcc,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T18:53:24,981 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@2879aff9[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:45993, datanodeUuid=75eae523-6690-46a3-8eee-6948bb1165b7, infoPort=32823, infoSecurePort=0, ipcPort=36131, storageInfo=lv=-57;cid=testClusterID;nsid=1784485339;c=1732474383219):Failed to transfer BP-829498211-172.17.0.2-1732474383219:blk_1073741839_1021 to 127.0.0.1:43133 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T18:53:25,149 INFO [regionserver/f2b92657890a:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:32783,DS-8bdd6e5e-876e-4f73-9203-c2f186854fcc,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T18:53:25,156 WARN [ResponseProcessor for block BP-829498211-172.17.0.2-1732474383219:blk_1073741840_1022 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-829498211-172.17.0.2-1732474383219:blk_1073741840_1022 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T18:53:25,156 WARN [DataStreamer for file /user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.1732474403135 block BP-829498211-172.17.0.2-1732474383219:blk_1073741840_1022 {}] hdfs.DataStreamer(1731): Error Recovery for BP-829498211-172.17.0.2-1732474383219:blk_1073741840_1022 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45993,DS-feb9aff5-069b-483f-95c1-d8393c723b1f,DISK], DatanodeInfoWithStorage[127.0.0.1:37367,DS-ca721525-2760-4643-ab64-d1434814772c,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45993,DS-feb9aff5-069b-483f-95c1-d8393c723b1f,DISK]) is bad. 2024-11-24T18:53:25,157 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1902154818_22 at /127.0.0.1:43668 [Receiving block BP-829498211-172.17.0.2-1732474383219:blk_1073741840_1022] {}] datanode.DataXceiver(331): 127.0.0.1:45993:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:43668 dst: /127.0.0.1:45993 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T18:53:25,158 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1902154818_22 at /127.0.0.1:60502 [Receiving block BP-829498211-172.17.0.2-1732474383219:blk_1073741840_1022] {}] datanode.DataXceiver(331): 127.0.0.1:37367:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60502 dst: /127.0.0.1:37367 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T18:53:25,161 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@6d82aaea{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T18:53:25,162 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3904e150{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-24T18:53:25,162 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-24T18:53:25,162 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@64b89ed5{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-24T18:53:25,162 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3f85c2b2{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e6689f1-50c4-dda5-193a-9d3c069fe268/hadoop.log.dir/,STOPPED} 2024-11-24T18:53:25,164 WARN [BP-829498211-172.17.0.2-1732474383219 heartbeating to localhost/127.0.0.1:37713 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-24T18:53:25,164 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-24T18:53:25,164 WARN [BP-829498211-172.17.0.2-1732474383219 heartbeating to localhost/127.0.0.1:37713 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-829498211-172.17.0.2-1732474383219 (Datanode Uuid 75eae523-6690-46a3-8eee-6948bb1165b7) service to localhost/127.0.0.1:37713 2024-11-24T18:53:25,164 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-24T18:53:25,164 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e6689f1-50c4-dda5-193a-9d3c069fe268/cluster_18358266-8dd0-4c2e-1aeb-38759830aa93/data/data7/current/BP-829498211-172.17.0.2-1732474383219 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T18:53:25,165 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e6689f1-50c4-dda5-193a-9d3c069fe268/cluster_18358266-8dd0-4c2e-1aeb-38759830aa93/data/data8/current/BP-829498211-172.17.0.2-1732474383219 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T18:53:25,165 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-24T18:53:25,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45405 {}] regionserver.HRegion(8855): Flush requested on cd688812c78d2df61acf002dfcc44a35 2024-11-24T18:53:25,173 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing cd688812c78d2df61acf002dfcc44a35 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-24T18:53:25,191 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/data/default/TestLogRolling-testLogRollOnDatanodeDeath/cd688812c78d2df61acf002dfcc44a35/.tmp/info/77e96bc6576249929f33d4381ed0d3f7 is 1080, key is row0002/info:/1732474401147/Put/seqid=0 2024-11-24T18:53:25,193 INFO [regionserver/f2b92657890a:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:32783,DS-8bdd6e5e-876e-4f73-9203-c2f186854fcc,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T18:53:25,194 WARN [Thread-920 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741841_1024 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:32973 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T18:53:25,194 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1902154818_22 at /127.0.0.1:60518 [Receiving block BP-829498211-172.17.0.2-1732474383219:blk_1073741841_1024] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e6689f1-50c4-dda5-193a-9d3c069fe268/cluster_18358266-8dd0-4c2e-1aeb-38759830aa93/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e6689f1-50c4-dda5-193a-9d3c069fe268/cluster_18358266-8dd0-4c2e-1aeb-38759830aa93/data/data6]'}, localName='127.0.0.1:37367', datanodeUuid='b8ca2119-ac3b-47a1-a2c3-1d0f556c0334', xmitsInProgress=0}:Exception transferring block BP-829498211-172.17.0.2-1732474383219:blk_1073741841_1024 to mirror 127.0.0.1:32973 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T18:53:25,194 WARN [Thread-920 {}] hdfs.DataStreamer(1731): Error Recovery for BP-829498211-172.17.0.2-1732474383219:blk_1073741841_1024 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37367,DS-ca721525-2760-4643-ab64-d1434814772c,DISK], DatanodeInfoWithStorage[127.0.0.1:32973,DS-7c4fcb9f-751f-4a33-b466-2d1e1999346e,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:32973,DS-7c4fcb9f-751f-4a33-b466-2d1e1999346e,DISK]) is bad. 2024-11-24T18:53:25,194 WARN [Thread-920 {}] hdfs.DataStreamer(1850): Abandoning BP-829498211-172.17.0.2-1732474383219:blk_1073741841_1024 2024-11-24T18:53:25,194 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1902154818_22 at /127.0.0.1:60518 [Receiving block BP-829498211-172.17.0.2-1732474383219:blk_1073741841_1024] {}] datanode.BlockReceiver(316): Block 1073741841 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-24T18:53:25,194 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1902154818_22 at /127.0.0.1:60518 [Receiving block BP-829498211-172.17.0.2-1732474383219:blk_1073741841_1024] {}] datanode.DataXceiver(331): 127.0.0.1:37367:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60518 dst: /127.0.0.1:37367 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T18:53:25,195 WARN [Thread-920 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:32973,DS-7c4fcb9f-751f-4a33-b466-2d1e1999346e,DISK] 2024-11-24T18:53:25,197 WARN [Thread-920 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741842_1025 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:32783 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T18:53:25,197 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1902154818_22 at /127.0.0.1:60528 [Receiving block BP-829498211-172.17.0.2-1732474383219:blk_1073741842_1025] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e6689f1-50c4-dda5-193a-9d3c069fe268/cluster_18358266-8dd0-4c2e-1aeb-38759830aa93/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e6689f1-50c4-dda5-193a-9d3c069fe268/cluster_18358266-8dd0-4c2e-1aeb-38759830aa93/data/data6]'}, localName='127.0.0.1:37367', datanodeUuid='b8ca2119-ac3b-47a1-a2c3-1d0f556c0334', xmitsInProgress=0}:Exception transferring block BP-829498211-172.17.0.2-1732474383219:blk_1073741842_1025 to mirror 127.0.0.1:32783 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T18:53:25,197 WARN [Thread-920 {}] hdfs.DataStreamer(1731): Error Recovery for BP-829498211-172.17.0.2-1732474383219:blk_1073741842_1025 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37367,DS-ca721525-2760-4643-ab64-d1434814772c,DISK], DatanodeInfoWithStorage[127.0.0.1:32783,DS-8bdd6e5e-876e-4f73-9203-c2f186854fcc,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:32783,DS-8bdd6e5e-876e-4f73-9203-c2f186854fcc,DISK]) is bad. 2024-11-24T18:53:25,197 WARN [Thread-920 {}] hdfs.DataStreamer(1850): Abandoning BP-829498211-172.17.0.2-1732474383219:blk_1073741842_1025 2024-11-24T18:53:25,197 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1902154818_22 at /127.0.0.1:60528 [Receiving block BP-829498211-172.17.0.2-1732474383219:blk_1073741842_1025] {}] datanode.BlockReceiver(316): Block 1073741842 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-24T18:53:25,198 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1902154818_22 at /127.0.0.1:60528 [Receiving block BP-829498211-172.17.0.2-1732474383219:blk_1073741842_1025] {}] datanode.DataXceiver(331): 127.0.0.1:37367:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60528 dst: /127.0.0.1:37367 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T18:53:25,198 WARN [Thread-920 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:32783,DS-8bdd6e5e-876e-4f73-9203-c2f186854fcc,DISK] 2024-11-24T18:53:25,199 WARN [Thread-920 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741843_1026 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T18:53:25,199 WARN [Thread-920 {}] hdfs.DataStreamer(1731): Error Recovery for BP-829498211-172.17.0.2-1732474383219:blk_1073741843_1026 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43133,DS-0716b5d6-ffd7-4bc5-9c59-8ee7cde1e954,DISK], DatanodeInfoWithStorage[127.0.0.1:45993,DS-feb9aff5-069b-483f-95c1-d8393c723b1f,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:43133,DS-0716b5d6-ffd7-4bc5-9c59-8ee7cde1e954,DISK]) is bad. 2024-11-24T18:53:25,199 WARN [Thread-920 {}] hdfs.DataStreamer(1850): Abandoning BP-829498211-172.17.0.2-1732474383219:blk_1073741843_1026 2024-11-24T18:53:25,200 WARN [Thread-920 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43133,DS-0716b5d6-ffd7-4bc5-9c59-8ee7cde1e954,DISK] 2024-11-24T18:53:25,202 WARN [Thread-920 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741844_1027 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:45993 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T18:53:25,202 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1902154818_22 at /127.0.0.1:60534 [Receiving block BP-829498211-172.17.0.2-1732474383219:blk_1073741844_1027] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e6689f1-50c4-dda5-193a-9d3c069fe268/cluster_18358266-8dd0-4c2e-1aeb-38759830aa93/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e6689f1-50c4-dda5-193a-9d3c069fe268/cluster_18358266-8dd0-4c2e-1aeb-38759830aa93/data/data6]'}, localName='127.0.0.1:37367', datanodeUuid='b8ca2119-ac3b-47a1-a2c3-1d0f556c0334', xmitsInProgress=0}:Exception transferring block BP-829498211-172.17.0.2-1732474383219:blk_1073741844_1027 to mirror 127.0.0.1:45993 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T18:53:25,202 WARN [Thread-920 {}] hdfs.DataStreamer(1731): Error Recovery for BP-829498211-172.17.0.2-1732474383219:blk_1073741844_1027 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37367,DS-ca721525-2760-4643-ab64-d1434814772c,DISK], DatanodeInfoWithStorage[127.0.0.1:45993,DS-feb9aff5-069b-483f-95c1-d8393c723b1f,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:45993,DS-feb9aff5-069b-483f-95c1-d8393c723b1f,DISK]) is bad. 2024-11-24T18:53:25,203 WARN [Thread-920 {}] hdfs.DataStreamer(1850): Abandoning BP-829498211-172.17.0.2-1732474383219:blk_1073741844_1027 2024-11-24T18:53:25,203 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1902154818_22 at /127.0.0.1:60534 [Receiving block BP-829498211-172.17.0.2-1732474383219:blk_1073741844_1027] {}] datanode.BlockReceiver(316): Block 1073741844 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-24T18:53:25,203 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1902154818_22 at /127.0.0.1:60534 [Receiving block BP-829498211-172.17.0.2-1732474383219:blk_1073741844_1027] {}] datanode.DataXceiver(331): 127.0.0.1:37367:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60534 dst: /127.0.0.1:37367 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T18:53:25,203 WARN [Thread-920 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45993,DS-feb9aff5-069b-483f-95c1-d8393c723b1f,DISK] 2024-11-24T18:53:25,204 WARN [IPC Server handler 1 on default port 37713 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-24T18:53:25,204 WARN [IPC Server handler 1 on default port 37713 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-24T18:53:25,204 WARN [IPC Server handler 1 on default port 37713 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-24T18:53:25,207 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37367 is added to blk_1073741845_1028 (size=10347) 2024-11-24T18:53:25,608 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/data/default/TestLogRolling-testLogRollOnDatanodeDeath/cd688812c78d2df61acf002dfcc44a35/.tmp/info/77e96bc6576249929f33d4381ed0d3f7 2024-11-24T18:53:25,617 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/data/default/TestLogRolling-testLogRollOnDatanodeDeath/cd688812c78d2df61acf002dfcc44a35/.tmp/info/77e96bc6576249929f33d4381ed0d3f7 as hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/data/default/TestLogRolling-testLogRollOnDatanodeDeath/cd688812c78d2df61acf002dfcc44a35/info/77e96bc6576249929f33d4381ed0d3f7 2024-11-24T18:53:25,626 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/data/default/TestLogRolling-testLogRollOnDatanodeDeath/cd688812c78d2df61acf002dfcc44a35/info/77e96bc6576249929f33d4381ed0d3f7, entries=5, sequenceid=11, filesize=10.1 K 2024-11-24T18:53:25,627 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=9.45 KB/9681 for cd688812c78d2df61acf002dfcc44a35 in 454ms, sequenceid=11, compaction requested=false 2024-11-24T18:53:25,627 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for cd688812c78d2df61acf002dfcc44a35: 2024-11-24T18:53:25,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45405 {}] regionserver.HRegion(8855): Flush requested on cd688812c78d2df61acf002dfcc44a35 2024-11-24T18:53:25,796 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing cd688812c78d2df61acf002dfcc44a35 1/1 column families, dataSize=10.50 KB heapSize=11.50 KB 2024-11-24T18:53:25,800 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/data/default/TestLogRolling-testLogRollOnDatanodeDeath/cd688812c78d2df61acf002dfcc44a35/.tmp/info/3c6fdc1afd34466ba41bcc786a2a92cb is 1080, key is row0007/info:/1732474405174/Put/seqid=0 2024-11-24T18:53:25,803 WARN [Thread-928 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741846_1029 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:32973 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T18:53:25,802 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1902154818_22 at /127.0.0.1:60550 [Receiving block BP-829498211-172.17.0.2-1732474383219:blk_1073741846_1029] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e6689f1-50c4-dda5-193a-9d3c069fe268/cluster_18358266-8dd0-4c2e-1aeb-38759830aa93/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e6689f1-50c4-dda5-193a-9d3c069fe268/cluster_18358266-8dd0-4c2e-1aeb-38759830aa93/data/data6]'}, localName='127.0.0.1:37367', datanodeUuid='b8ca2119-ac3b-47a1-a2c3-1d0f556c0334', xmitsInProgress=0}:Exception transferring block BP-829498211-172.17.0.2-1732474383219:blk_1073741846_1029 to mirror 127.0.0.1:32973 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T18:53:25,803 WARN [Thread-928 {}] hdfs.DataStreamer(1731): Error Recovery for BP-829498211-172.17.0.2-1732474383219:blk_1073741846_1029 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37367,DS-ca721525-2760-4643-ab64-d1434814772c,DISK], DatanodeInfoWithStorage[127.0.0.1:32973,DS-7c4fcb9f-751f-4a33-b466-2d1e1999346e,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:32973,DS-7c4fcb9f-751f-4a33-b466-2d1e1999346e,DISK]) is bad. 2024-11-24T18:53:25,803 WARN [Thread-928 {}] hdfs.DataStreamer(1850): Abandoning BP-829498211-172.17.0.2-1732474383219:blk_1073741846_1029 2024-11-24T18:53:25,803 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1902154818_22 at /127.0.0.1:60550 [Receiving block BP-829498211-172.17.0.2-1732474383219:blk_1073741846_1029] {}] datanode.BlockReceiver(316): Block 1073741846 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-24T18:53:25,803 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1902154818_22 at /127.0.0.1:60550 [Receiving block BP-829498211-172.17.0.2-1732474383219:blk_1073741846_1029] {}] datanode.DataXceiver(331): 127.0.0.1:37367:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60550 dst: /127.0.0.1:37367 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T18:53:25,804 WARN [Thread-928 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:32973,DS-7c4fcb9f-751f-4a33-b466-2d1e1999346e,DISK] 2024-11-24T18:53:25,805 WARN [Thread-928 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741847_1030 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T18:53:25,805 WARN [Thread-928 {}] hdfs.DataStreamer(1731): Error Recovery for BP-829498211-172.17.0.2-1732474383219:blk_1073741847_1030 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45993,DS-feb9aff5-069b-483f-95c1-d8393c723b1f,DISK], DatanodeInfoWithStorage[127.0.0.1:32783,DS-8bdd6e5e-876e-4f73-9203-c2f186854fcc,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45993,DS-feb9aff5-069b-483f-95c1-d8393c723b1f,DISK]) is bad. 2024-11-24T18:53:25,805 WARN [Thread-928 {}] hdfs.DataStreamer(1850): Abandoning BP-829498211-172.17.0.2-1732474383219:blk_1073741847_1030 2024-11-24T18:53:25,806 WARN [Thread-928 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45993,DS-feb9aff5-069b-483f-95c1-d8393c723b1f,DISK] 2024-11-24T18:53:25,807 WARN [Thread-928 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741848_1031 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T18:53:25,808 WARN [Thread-928 {}] hdfs.DataStreamer(1731): Error Recovery for BP-829498211-172.17.0.2-1732474383219:blk_1073741848_1031 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43133,DS-0716b5d6-ffd7-4bc5-9c59-8ee7cde1e954,DISK], DatanodeInfoWithStorage[127.0.0.1:37367,DS-ca721525-2760-4643-ab64-d1434814772c,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:43133,DS-0716b5d6-ffd7-4bc5-9c59-8ee7cde1e954,DISK]) is bad. 2024-11-24T18:53:25,808 WARN [Thread-928 {}] hdfs.DataStreamer(1850): Abandoning BP-829498211-172.17.0.2-1732474383219:blk_1073741848_1031 2024-11-24T18:53:25,808 WARN [Thread-928 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43133,DS-0716b5d6-ffd7-4bc5-9c59-8ee7cde1e954,DISK] 2024-11-24T18:53:25,811 WARN [Thread-928 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741849_1032 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:32783 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T18:53:25,811 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1902154818_22 at /127.0.0.1:60566 [Receiving block BP-829498211-172.17.0.2-1732474383219:blk_1073741849_1032] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e6689f1-50c4-dda5-193a-9d3c069fe268/cluster_18358266-8dd0-4c2e-1aeb-38759830aa93/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e6689f1-50c4-dda5-193a-9d3c069fe268/cluster_18358266-8dd0-4c2e-1aeb-38759830aa93/data/data6]'}, localName='127.0.0.1:37367', datanodeUuid='b8ca2119-ac3b-47a1-a2c3-1d0f556c0334', xmitsInProgress=0}:Exception transferring block BP-829498211-172.17.0.2-1732474383219:blk_1073741849_1032 to mirror 127.0.0.1:32783 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T18:53:25,811 WARN [Thread-928 {}] hdfs.DataStreamer(1731): Error Recovery for BP-829498211-172.17.0.2-1732474383219:blk_1073741849_1032 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37367,DS-ca721525-2760-4643-ab64-d1434814772c,DISK], DatanodeInfoWithStorage[127.0.0.1:32783,DS-8bdd6e5e-876e-4f73-9203-c2f186854fcc,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:32783,DS-8bdd6e5e-876e-4f73-9203-c2f186854fcc,DISK]) is bad. 2024-11-24T18:53:25,811 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1902154818_22 at /127.0.0.1:60566 [Receiving block BP-829498211-172.17.0.2-1732474383219:blk_1073741849_1032] {}] datanode.BlockReceiver(316): Block 1073741849 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-24T18:53:25,811 WARN [Thread-928 {}] hdfs.DataStreamer(1850): Abandoning BP-829498211-172.17.0.2-1732474383219:blk_1073741849_1032 2024-11-24T18:53:25,812 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1902154818_22 at /127.0.0.1:60566 [Receiving block BP-829498211-172.17.0.2-1732474383219:blk_1073741849_1032] {}] datanode.DataXceiver(331): 127.0.0.1:37367:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60566 dst: /127.0.0.1:37367 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T18:53:25,812 WARN [Thread-928 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:32783,DS-8bdd6e5e-876e-4f73-9203-c2f186854fcc,DISK] 2024-11-24T18:53:25,813 WARN [IPC Server handler 1 on default port 37713 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-24T18:53:25,813 WARN [IPC Server handler 1 on default port 37713 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-24T18:53:25,813 WARN [IPC Server handler 1 on default port 37713 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-24T18:53:25,818 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37367 is added to blk_1073741850_1033 (size=12506) 2024-11-24T18:53:26,044 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:32783,DS-8bdd6e5e-876e-4f73-9203-c2f186854fcc,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T18:53:26,219 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=10.50 KB at sequenceid=24 (bloomFilter=true), to=hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/data/default/TestLogRolling-testLogRollOnDatanodeDeath/cd688812c78d2df61acf002dfcc44a35/.tmp/info/3c6fdc1afd34466ba41bcc786a2a92cb 2024-11-24T18:53:26,226 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/data/default/TestLogRolling-testLogRollOnDatanodeDeath/cd688812c78d2df61acf002dfcc44a35/.tmp/info/3c6fdc1afd34466ba41bcc786a2a92cb as hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/data/default/TestLogRolling-testLogRollOnDatanodeDeath/cd688812c78d2df61acf002dfcc44a35/info/3c6fdc1afd34466ba41bcc786a2a92cb 2024-11-24T18:53:26,234 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/data/default/TestLogRolling-testLogRollOnDatanodeDeath/cd688812c78d2df61acf002dfcc44a35/info/3c6fdc1afd34466ba41bcc786a2a92cb, entries=7, sequenceid=24, filesize=12.2 K 2024-11-24T18:53:26,235 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~10.50 KB/10756, heapSize ~11.48 KB/11760, currentSize=2.10 KB/2150 for cd688812c78d2df61acf002dfcc44a35 in 439ms, sequenceid=24, compaction requested=false 2024-11-24T18:53:26,236 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for cd688812c78d2df61acf002dfcc44a35: 2024-11-24T18:53:26,236 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=22.3 K, sizeToCheck=16.0 K 2024-11-24T18:53:26,236 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-24T18:53:26,236 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/data/default/TestLogRolling-testLogRollOnDatanodeDeath/cd688812c78d2df61acf002dfcc44a35/info/3c6fdc1afd34466ba41bcc786a2a92cb because midkey is the same as first or last row 2024-11-24T18:53:27,149 INFO [regionserver/f2b92657890a:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:32783,DS-8bdd6e5e-876e-4f73-9203-c2f186854fcc,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T18:53:27,150 WARN [regionserver/f2b92657890a:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37367,DS-ca721525-2760-4643-ab64-d1434814772c,DISK]] 2024-11-24T18:53:27,150 DEBUG [regionserver/f2b92657890a:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog f2b92657890a%2C45405%2C1732474385930:(num 1732474403135) roll requested 2024-11-24T18:53:27,151 INFO [regionserver/f2b92657890a:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor f2b92657890a%2C45405%2C1732474385930.1732474407150 2024-11-24T18:53:27,154 WARN [Thread-934 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741851_1034 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T18:53:27,154 WARN [Thread-934 {}] hdfs.DataStreamer(1731): Error Recovery for BP-829498211-172.17.0.2-1732474383219:blk_1073741851_1034 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45993,DS-feb9aff5-069b-483f-95c1-d8393c723b1f,DISK], DatanodeInfoWithStorage[127.0.0.1:32783,DS-8bdd6e5e-876e-4f73-9203-c2f186854fcc,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45993,DS-feb9aff5-069b-483f-95c1-d8393c723b1f,DISK]) is bad. 2024-11-24T18:53:27,154 WARN [Thread-934 {}] hdfs.DataStreamer(1850): Abandoning BP-829498211-172.17.0.2-1732474383219:blk_1073741851_1034 2024-11-24T18:53:27,155 WARN [Thread-934 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45993,DS-feb9aff5-069b-483f-95c1-d8393c723b1f,DISK] 2024-11-24T18:53:27,157 WARN [Thread-934 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741852_1035 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T18:53:27,157 WARN [Thread-934 {}] hdfs.DataStreamer(1731): Error Recovery for BP-829498211-172.17.0.2-1732474383219:blk_1073741852_1035 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43133,DS-0716b5d6-ffd7-4bc5-9c59-8ee7cde1e954,DISK], DatanodeInfoWithStorage[127.0.0.1:32783,DS-8bdd6e5e-876e-4f73-9203-c2f186854fcc,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:43133,DS-0716b5d6-ffd7-4bc5-9c59-8ee7cde1e954,DISK]) is bad. 2024-11-24T18:53:27,157 WARN [Thread-934 {}] hdfs.DataStreamer(1850): Abandoning BP-829498211-172.17.0.2-1732474383219:blk_1073741852_1035 2024-11-24T18:53:27,158 WARN [Thread-934 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43133,DS-0716b5d6-ffd7-4bc5-9c59-8ee7cde1e954,DISK] 2024-11-24T18:53:27,160 WARN [Thread-934 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741853_1036 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T18:53:27,160 WARN [Thread-934 {}] hdfs.DataStreamer(1731): Error Recovery for BP-829498211-172.17.0.2-1732474383219:blk_1073741853_1036 in pipeline [DatanodeInfoWithStorage[127.0.0.1:32973,DS-7c4fcb9f-751f-4a33-b466-2d1e1999346e,DISK], DatanodeInfoWithStorage[127.0.0.1:37367,DS-ca721525-2760-4643-ab64-d1434814772c,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:32973,DS-7c4fcb9f-751f-4a33-b466-2d1e1999346e,DISK]) is bad. 2024-11-24T18:53:27,160 WARN [Thread-934 {}] hdfs.DataStreamer(1850): Abandoning BP-829498211-172.17.0.2-1732474383219:blk_1073741853_1036 2024-11-24T18:53:27,161 WARN [Thread-934 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:32973,DS-7c4fcb9f-751f-4a33-b466-2d1e1999346e,DISK] 2024-11-24T18:53:27,164 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1902154818_22 at /127.0.0.1:60574 [Receiving block BP-829498211-172.17.0.2-1732474383219:blk_1073741854_1037] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e6689f1-50c4-dda5-193a-9d3c069fe268/cluster_18358266-8dd0-4c2e-1aeb-38759830aa93/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e6689f1-50c4-dda5-193a-9d3c069fe268/cluster_18358266-8dd0-4c2e-1aeb-38759830aa93/data/data6]'}, localName='127.0.0.1:37367', datanodeUuid='b8ca2119-ac3b-47a1-a2c3-1d0f556c0334', xmitsInProgress=0}:Exception transferring block BP-829498211-172.17.0.2-1732474383219:blk_1073741854_1037 to mirror 127.0.0.1:32783 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T18:53:27,165 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1902154818_22 at /127.0.0.1:60574 [Receiving block BP-829498211-172.17.0.2-1732474383219:blk_1073741854_1037] {}] datanode.BlockReceiver(316): Block 1073741854 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-24T18:53:27,165 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1902154818_22 at /127.0.0.1:60574 [Receiving block BP-829498211-172.17.0.2-1732474383219:blk_1073741854_1037] {}] datanode.DataXceiver(331): 127.0.0.1:37367:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60574 dst: /127.0.0.1:37367 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T18:53:27,166 WARN [Thread-934 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741854_1037 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:32783 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T18:53:27,167 WARN [Thread-934 {}] hdfs.DataStreamer(1731): Error Recovery for BP-829498211-172.17.0.2-1732474383219:blk_1073741854_1037 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37367,DS-ca721525-2760-4643-ab64-d1434814772c,DISK], DatanodeInfoWithStorage[127.0.0.1:32783,DS-8bdd6e5e-876e-4f73-9203-c2f186854fcc,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:32783,DS-8bdd6e5e-876e-4f73-9203-c2f186854fcc,DISK]) is bad. 2024-11-24T18:53:27,167 WARN [Thread-934 {}] hdfs.DataStreamer(1850): Abandoning BP-829498211-172.17.0.2-1732474383219:blk_1073741854_1037 2024-11-24T18:53:27,168 WARN [Thread-934 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:32783,DS-8bdd6e5e-876e-4f73-9203-c2f186854fcc,DISK] 2024-11-24T18:53:27,169 WARN [IPC Server handler 4 on default port 37713 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-24T18:53:27,169 WARN [IPC Server handler 4 on default port 37713 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-24T18:53:27,169 WARN [IPC Server handler 4 on default port 37713 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-24T18:53:27,177 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:53:27,178 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:53:27,178 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:53:27,178 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:53:27,178 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:53:27,178 INFO [regionserver/f2b92657890a:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.1732474403135 with entries=25, filesize=25.38 KB; new WAL /user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.1732474407150 2024-11-24T18:53:27,180 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37367 is added to blk_1073741840_1023 (size=25992) 2024-11-24T18:53:27,188 DEBUG [regionserver/f2b92657890a:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45895:45895)] 2024-11-24T18:53:27,188 DEBUG [regionserver/f2b92657890a:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.1732474386585 is not closed yet, will try archiving it next time 2024-11-24T18:53:27,188 DEBUG [regionserver/f2b92657890a:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.1732474403135 is not closed yet, will try archiving it next time 2024-11-24T18:53:27,191 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.1732474399121 to hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/oldWALs/f2b92657890a%2C45405%2C1732474385930.1732474399121 2024-11-24T18:53:27,193 INFO [regionserver/f2b92657890a:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:32783,DS-8bdd6e5e-876e-4f73-9203-c2f186854fcc,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T18:53:27,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45405 {}] regionserver.HRegion(8855): Flush requested on cd688812c78d2df61acf002dfcc44a35 2024-11-24T18:53:27,220 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing cd688812c78d2df61acf002dfcc44a35 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-24T18:53:27,226 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/data/default/TestLogRolling-testLogRollOnDatanodeDeath/cd688812c78d2df61acf002dfcc44a35/.tmp/info/52f25de3a2474ad3af3885786315ffae is 1079, key is tmprow/info:/1732474407219/Put/seqid=0 2024-11-24T18:53:27,233 WARN [Thread-939 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741856_1039 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:32973 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T18:53:27,233 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1902154818_22 at /127.0.0.1:60586 [Receiving block BP-829498211-172.17.0.2-1732474383219:blk_1073741856_1039] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e6689f1-50c4-dda5-193a-9d3c069fe268/cluster_18358266-8dd0-4c2e-1aeb-38759830aa93/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e6689f1-50c4-dda5-193a-9d3c069fe268/cluster_18358266-8dd0-4c2e-1aeb-38759830aa93/data/data6]'}, localName='127.0.0.1:37367', datanodeUuid='b8ca2119-ac3b-47a1-a2c3-1d0f556c0334', xmitsInProgress=0}:Exception transferring block BP-829498211-172.17.0.2-1732474383219:blk_1073741856_1039 to mirror 127.0.0.1:32973 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T18:53:27,234 WARN [Thread-939 {}] hdfs.DataStreamer(1731): Error Recovery for BP-829498211-172.17.0.2-1732474383219:blk_1073741856_1039 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37367,DS-ca721525-2760-4643-ab64-d1434814772c,DISK], DatanodeInfoWithStorage[127.0.0.1:32973,DS-7c4fcb9f-751f-4a33-b466-2d1e1999346e,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:32973,DS-7c4fcb9f-751f-4a33-b466-2d1e1999346e,DISK]) is bad. 2024-11-24T18:53:27,234 WARN [Thread-939 {}] hdfs.DataStreamer(1850): Abandoning BP-829498211-172.17.0.2-1732474383219:blk_1073741856_1039 2024-11-24T18:53:27,234 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1902154818_22 at /127.0.0.1:60586 [Receiving block BP-829498211-172.17.0.2-1732474383219:blk_1073741856_1039] {}] datanode.BlockReceiver(316): Block 1073741856 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-24T18:53:27,234 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1902154818_22 at /127.0.0.1:60586 [Receiving block BP-829498211-172.17.0.2-1732474383219:blk_1073741856_1039] {}] datanode.DataXceiver(331): 127.0.0.1:37367:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60586 dst: /127.0.0.1:37367 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T18:53:27,235 WARN [Thread-939 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:32973,DS-7c4fcb9f-751f-4a33-b466-2d1e1999346e,DISK] 2024-11-24T18:53:27,241 WARN [Thread-939 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741857_1040 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:32783 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T18:53:27,241 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1902154818_22 at /127.0.0.1:60596 [Receiving block BP-829498211-172.17.0.2-1732474383219:blk_1073741857_1040] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e6689f1-50c4-dda5-193a-9d3c069fe268/cluster_18358266-8dd0-4c2e-1aeb-38759830aa93/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e6689f1-50c4-dda5-193a-9d3c069fe268/cluster_18358266-8dd0-4c2e-1aeb-38759830aa93/data/data6]'}, localName='127.0.0.1:37367', datanodeUuid='b8ca2119-ac3b-47a1-a2c3-1d0f556c0334', xmitsInProgress=0}:Exception transferring block BP-829498211-172.17.0.2-1732474383219:blk_1073741857_1040 to mirror 127.0.0.1:32783 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T18:53:27,242 WARN [Thread-939 {}] hdfs.DataStreamer(1731): Error Recovery for BP-829498211-172.17.0.2-1732474383219:blk_1073741857_1040 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37367,DS-ca721525-2760-4643-ab64-d1434814772c,DISK], DatanodeInfoWithStorage[127.0.0.1:32783,DS-8bdd6e5e-876e-4f73-9203-c2f186854fcc,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:32783,DS-8bdd6e5e-876e-4f73-9203-c2f186854fcc,DISK]) is bad. 2024-11-24T18:53:27,242 WARN [Thread-939 {}] hdfs.DataStreamer(1850): Abandoning BP-829498211-172.17.0.2-1732474383219:blk_1073741857_1040 2024-11-24T18:53:27,242 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1902154818_22 at /127.0.0.1:60596 [Receiving block BP-829498211-172.17.0.2-1732474383219:blk_1073741857_1040] {}] datanode.BlockReceiver(316): Block 1073741857 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-24T18:53:27,242 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1902154818_22 at /127.0.0.1:60596 [Receiving block BP-829498211-172.17.0.2-1732474383219:blk_1073741857_1040] {}] datanode.DataXceiver(331): 127.0.0.1:37367:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60596 dst: /127.0.0.1:37367 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T18:53:27,243 WARN [Thread-939 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:32783,DS-8bdd6e5e-876e-4f73-9203-c2f186854fcc,DISK] 2024-11-24T18:53:27,245 WARN [Thread-939 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741858_1041 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T18:53:27,245 WARN [Thread-939 {}] hdfs.DataStreamer(1731): Error Recovery for BP-829498211-172.17.0.2-1732474383219:blk_1073741858_1041 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45993,DS-feb9aff5-069b-483f-95c1-d8393c723b1f,DISK], DatanodeInfoWithStorage[127.0.0.1:43133,DS-0716b5d6-ffd7-4bc5-9c59-8ee7cde1e954,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45993,DS-feb9aff5-069b-483f-95c1-d8393c723b1f,DISK]) is bad. 2024-11-24T18:53:27,245 WARN [Thread-939 {}] hdfs.DataStreamer(1850): Abandoning BP-829498211-172.17.0.2-1732474383219:blk_1073741858_1041 2024-11-24T18:53:27,246 WARN [Thread-939 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45993,DS-feb9aff5-069b-483f-95c1-d8393c723b1f,DISK] 2024-11-24T18:53:27,249 WARN [Thread-939 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741859_1042 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:43133 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T18:53:27,249 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1902154818_22 at /127.0.0.1:60604 [Receiving block BP-829498211-172.17.0.2-1732474383219:blk_1073741859_1042] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e6689f1-50c4-dda5-193a-9d3c069fe268/cluster_18358266-8dd0-4c2e-1aeb-38759830aa93/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e6689f1-50c4-dda5-193a-9d3c069fe268/cluster_18358266-8dd0-4c2e-1aeb-38759830aa93/data/data6]'}, localName='127.0.0.1:37367', datanodeUuid='b8ca2119-ac3b-47a1-a2c3-1d0f556c0334', xmitsInProgress=0}:Exception transferring block BP-829498211-172.17.0.2-1732474383219:blk_1073741859_1042 to mirror 127.0.0.1:43133 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T18:53:27,250 WARN [Thread-939 {}] hdfs.DataStreamer(1731): Error Recovery for BP-829498211-172.17.0.2-1732474383219:blk_1073741859_1042 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37367,DS-ca721525-2760-4643-ab64-d1434814772c,DISK], DatanodeInfoWithStorage[127.0.0.1:43133,DS-0716b5d6-ffd7-4bc5-9c59-8ee7cde1e954,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:43133,DS-0716b5d6-ffd7-4bc5-9c59-8ee7cde1e954,DISK]) is bad. 2024-11-24T18:53:27,250 WARN [Thread-939 {}] hdfs.DataStreamer(1850): Abandoning BP-829498211-172.17.0.2-1732474383219:blk_1073741859_1042 2024-11-24T18:53:27,250 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1902154818_22 at /127.0.0.1:60604 [Receiving block BP-829498211-172.17.0.2-1732474383219:blk_1073741859_1042] {}] datanode.BlockReceiver(316): Block 1073741859 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-24T18:53:27,250 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1902154818_22 at /127.0.0.1:60604 [Receiving block BP-829498211-172.17.0.2-1732474383219:blk_1073741859_1042] {}] datanode.DataXceiver(331): 127.0.0.1:37367:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60604 dst: /127.0.0.1:37367 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T18:53:27,250 WARN [Thread-939 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43133,DS-0716b5d6-ffd7-4bc5-9c59-8ee7cde1e954,DISK] 2024-11-24T18:53:27,251 WARN [IPC Server handler 3 on default port 37713 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-24T18:53:27,252 WARN [IPC Server handler 3 on default port 37713 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-24T18:53:27,252 WARN [IPC Server handler 3 on default port 37713 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-24T18:53:27,257 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37367 is added to blk_1073741860_1043 (size=6027) 2024-11-24T18:53:27,582 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.1732474386585 is not closed yet, will try archiving it next time 2024-11-24T18:53:27,659 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=34 (bloomFilter=true), to=hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/data/default/TestLogRolling-testLogRollOnDatanodeDeath/cd688812c78d2df61acf002dfcc44a35/.tmp/info/52f25de3a2474ad3af3885786315ffae 2024-11-24T18:53:27,667 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/data/default/TestLogRolling-testLogRollOnDatanodeDeath/cd688812c78d2df61acf002dfcc44a35/.tmp/info/52f25de3a2474ad3af3885786315ffae as hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/data/default/TestLogRolling-testLogRollOnDatanodeDeath/cd688812c78d2df61acf002dfcc44a35/info/52f25de3a2474ad3af3885786315ffae 2024-11-24T18:53:27,675 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/data/default/TestLogRolling-testLogRollOnDatanodeDeath/cd688812c78d2df61acf002dfcc44a35/info/52f25de3a2474ad3af3885786315ffae, entries=1, sequenceid=34, filesize=5.9 K 2024-11-24T18:53:27,677 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=2.10 KB/2150 for cd688812c78d2df61acf002dfcc44a35 in 457ms, sequenceid=34, compaction requested=true 2024-11-24T18:53:27,677 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for cd688812c78d2df61acf002dfcc44a35: 2024-11-24T18:53:27,677 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=28.2 K, sizeToCheck=16.0 K 2024-11-24T18:53:27,677 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-24T18:53:27,677 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/data/default/TestLogRolling-testLogRollOnDatanodeDeath/cd688812c78d2df61acf002dfcc44a35/info/3c6fdc1afd34466ba41bcc786a2a92cb because midkey is the same as first or last row 2024-11-24T18:53:27,678 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store cd688812c78d2df61acf002dfcc44a35:info, priority=-2147483648, current under compaction store size is 1 2024-11-24T18:53:27,678 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-24T18:53:27,678 DEBUG [RS:0;f2b92657890a:45405-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-24T18:53:27,679 DEBUG [RS:0;f2b92657890a:45405-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 28880 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-24T18:53:27,680 DEBUG [RS:0;f2b92657890a:45405-shortCompactions-0 {}] regionserver.HStore(1541): cd688812c78d2df61acf002dfcc44a35/info is initiating minor compaction (all files) 2024-11-24T18:53:27,680 INFO [RS:0;f2b92657890a:45405-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of cd688812c78d2df61acf002dfcc44a35/info in TestLogRolling-testLogRollOnDatanodeDeath,,1732474387223.cd688812c78d2df61acf002dfcc44a35. 2024-11-24T18:53:27,680 INFO [RS:0;f2b92657890a:45405-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/data/default/TestLogRolling-testLogRollOnDatanodeDeath/cd688812c78d2df61acf002dfcc44a35/info/77e96bc6576249929f33d4381ed0d3f7, hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/data/default/TestLogRolling-testLogRollOnDatanodeDeath/cd688812c78d2df61acf002dfcc44a35/info/3c6fdc1afd34466ba41bcc786a2a92cb, hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/data/default/TestLogRolling-testLogRollOnDatanodeDeath/cd688812c78d2df61acf002dfcc44a35/info/52f25de3a2474ad3af3885786315ffae] into tmpdir=hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/data/default/TestLogRolling-testLogRollOnDatanodeDeath/cd688812c78d2df61acf002dfcc44a35/.tmp, totalSize=28.2 K 2024-11-24T18:53:27,681 DEBUG [RS:0;f2b92657890a:45405-shortCompactions-0 {}] compactions.Compactor(225): Compacting 77e96bc6576249929f33d4381ed0d3f7, keycount=5, bloomtype=ROW, size=10.1 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1732474401147 2024-11-24T18:53:27,681 DEBUG [RS:0;f2b92657890a:45405-shortCompactions-0 {}] compactions.Compactor(225): Compacting 3c6fdc1afd34466ba41bcc786a2a92cb, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=24, earliestPutTs=1732474405174 2024-11-24T18:53:27,682 DEBUG [RS:0;f2b92657890a:45405-shortCompactions-0 {}] compactions.Compactor(225): Compacting 52f25de3a2474ad3af3885786315ffae, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=34, earliestPutTs=1732474407219 2024-11-24T18:53:27,700 INFO [RS:0;f2b92657890a:45405-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): cd688812c78d2df61acf002dfcc44a35#info#compaction#21 average throughput is 6.16 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-24T18:53:27,701 DEBUG [RS:0;f2b92657890a:45405-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/data/default/TestLogRolling-testLogRollOnDatanodeDeath/cd688812c78d2df61acf002dfcc44a35/.tmp/info/50b938b7236e4d70a3e799d85c4f7952 is 1080, key is row0002/info:/1732474401147/Put/seqid=0 2024-11-24T18:53:27,703 WARN [Thread-948 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741861_1044 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T18:53:27,703 WARN [Thread-948 {}] hdfs.DataStreamer(1731): Error Recovery for BP-829498211-172.17.0.2-1732474383219:blk_1073741861_1044 in pipeline [DatanodeInfoWithStorage[127.0.0.1:32973,DS-7c4fcb9f-751f-4a33-b466-2d1e1999346e,DISK], DatanodeInfoWithStorage[127.0.0.1:45993,DS-feb9aff5-069b-483f-95c1-d8393c723b1f,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:32973,DS-7c4fcb9f-751f-4a33-b466-2d1e1999346e,DISK]) is bad. 2024-11-24T18:53:27,703 WARN [Thread-948 {}] hdfs.DataStreamer(1850): Abandoning BP-829498211-172.17.0.2-1732474383219:blk_1073741861_1044 2024-11-24T18:53:27,704 WARN [Thread-948 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:32973,DS-7c4fcb9f-751f-4a33-b466-2d1e1999346e,DISK] 2024-11-24T18:53:27,705 WARN [Thread-948 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741862_1045 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T18:53:27,705 WARN [Thread-948 {}] hdfs.DataStreamer(1731): Error Recovery for BP-829498211-172.17.0.2-1732474383219:blk_1073741862_1045 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43133,DS-0716b5d6-ffd7-4bc5-9c59-8ee7cde1e954,DISK], DatanodeInfoWithStorage[127.0.0.1:32783,DS-8bdd6e5e-876e-4f73-9203-c2f186854fcc,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:43133,DS-0716b5d6-ffd7-4bc5-9c59-8ee7cde1e954,DISK]) is bad. 2024-11-24T18:53:27,705 WARN [Thread-948 {}] hdfs.DataStreamer(1850): Abandoning BP-829498211-172.17.0.2-1732474383219:blk_1073741862_1045 2024-11-24T18:53:27,706 WARN [Thread-948 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43133,DS-0716b5d6-ffd7-4bc5-9c59-8ee7cde1e954,DISK] 2024-11-24T18:53:27,709 WARN [Thread-948 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741863_1046 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:45993 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T18:53:27,709 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1902154818_22 at /127.0.0.1:60632 [Receiving block BP-829498211-172.17.0.2-1732474383219:blk_1073741863_1046] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e6689f1-50c4-dda5-193a-9d3c069fe268/cluster_18358266-8dd0-4c2e-1aeb-38759830aa93/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e6689f1-50c4-dda5-193a-9d3c069fe268/cluster_18358266-8dd0-4c2e-1aeb-38759830aa93/data/data6]'}, localName='127.0.0.1:37367', datanodeUuid='b8ca2119-ac3b-47a1-a2c3-1d0f556c0334', xmitsInProgress=0}:Exception transferring block BP-829498211-172.17.0.2-1732474383219:blk_1073741863_1046 to mirror 127.0.0.1:45993 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T18:53:27,709 WARN [Thread-948 {}] hdfs.DataStreamer(1731): Error Recovery for BP-829498211-172.17.0.2-1732474383219:blk_1073741863_1046 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37367,DS-ca721525-2760-4643-ab64-d1434814772c,DISK], DatanodeInfoWithStorage[127.0.0.1:45993,DS-feb9aff5-069b-483f-95c1-d8393c723b1f,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:45993,DS-feb9aff5-069b-483f-95c1-d8393c723b1f,DISK]) is bad. 2024-11-24T18:53:27,709 WARN [Thread-948 {}] hdfs.DataStreamer(1850): Abandoning BP-829498211-172.17.0.2-1732474383219:blk_1073741863_1046 2024-11-24T18:53:27,709 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1902154818_22 at /127.0.0.1:60632 [Receiving block BP-829498211-172.17.0.2-1732474383219:blk_1073741863_1046] {}] datanode.BlockReceiver(316): Block 1073741863 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-24T18:53:27,710 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1902154818_22 at /127.0.0.1:60632 [Receiving block BP-829498211-172.17.0.2-1732474383219:blk_1073741863_1046] {}] datanode.DataXceiver(331): 127.0.0.1:37367:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60632 dst: /127.0.0.1:37367 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T18:53:27,710 WARN [Thread-948 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45993,DS-feb9aff5-069b-483f-95c1-d8393c723b1f,DISK] 2024-11-24T18:53:27,714 WARN [Thread-948 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741864_1047 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T18:53:27,714 WARN [Thread-948 {}] hdfs.DataStreamer(1731): Error Recovery for BP-829498211-172.17.0.2-1732474383219:blk_1073741864_1047 in pipeline [DatanodeInfoWithStorage[127.0.0.1:32783,DS-8bdd6e5e-876e-4f73-9203-c2f186854fcc,DISK], DatanodeInfoWithStorage[127.0.0.1:37367,DS-ca721525-2760-4643-ab64-d1434814772c,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:32783,DS-8bdd6e5e-876e-4f73-9203-c2f186854fcc,DISK]) is bad. 2024-11-24T18:53:27,714 WARN [Thread-948 {}] hdfs.DataStreamer(1850): Abandoning BP-829498211-172.17.0.2-1732474383219:blk_1073741864_1047 2024-11-24T18:53:27,715 WARN [Thread-948 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:32783,DS-8bdd6e5e-876e-4f73-9203-c2f186854fcc,DISK] 2024-11-24T18:53:27,716 WARN [IPC Server handler 2 on default port 37713 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-24T18:53:27,717 WARN [IPC Server handler 2 on default port 37713 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-24T18:53:27,717 WARN [IPC Server handler 2 on default port 37713 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-24T18:53:27,727 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37367 is added to blk_1073741865_1048 (size=17994) 2024-11-24T18:53:27,741 DEBUG [RS:0;f2b92657890a:45405-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/data/default/TestLogRolling-testLogRollOnDatanodeDeath/cd688812c78d2df61acf002dfcc44a35/.tmp/info/50b938b7236e4d70a3e799d85c4f7952 as hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/data/default/TestLogRolling-testLogRollOnDatanodeDeath/cd688812c78d2df61acf002dfcc44a35/info/50b938b7236e4d70a3e799d85c4f7952 2024-11-24T18:53:27,756 INFO [RS:0;f2b92657890a:45405-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in cd688812c78d2df61acf002dfcc44a35/info of cd688812c78d2df61acf002dfcc44a35 into 50b938b7236e4d70a3e799d85c4f7952(size=17.6 K), total size for store is 17.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-24T18:53:27,756 DEBUG [RS:0;f2b92657890a:45405-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for cd688812c78d2df61acf002dfcc44a35: 2024-11-24T18:53:27,756 INFO [RS:0;f2b92657890a:45405-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRollOnDatanodeDeath,,1732474387223.cd688812c78d2df61acf002dfcc44a35., storeName=cd688812c78d2df61acf002dfcc44a35/info, priority=13, startTime=1732474407677; duration=0sec 2024-11-24T18:53:27,757 DEBUG [RS:0;f2b92657890a:45405-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-11-24T18:53:27,757 DEBUG [RS:0;f2b92657890a:45405-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-24T18:53:27,757 DEBUG [RS:0;f2b92657890a:45405-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/data/default/TestLogRolling-testLogRollOnDatanodeDeath/cd688812c78d2df61acf002dfcc44a35/info/50b938b7236e4d70a3e799d85c4f7952 because midkey is the same as first or last row 2024-11-24T18:53:27,757 DEBUG [RS:0;f2b92657890a:45405-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-11-24T18:53:27,757 DEBUG [RS:0;f2b92657890a:45405-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-24T18:53:27,757 DEBUG [RS:0;f2b92657890a:45405-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/data/default/TestLogRolling-testLogRollOnDatanodeDeath/cd688812c78d2df61acf002dfcc44a35/info/50b938b7236e4d70a3e799d85c4f7952 because midkey is the same as first or last row 2024-11-24T18:53:27,757 DEBUG [RS:0;f2b92657890a:45405-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-11-24T18:53:27,757 DEBUG [RS:0;f2b92657890a:45405-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-24T18:53:27,757 DEBUG [RS:0;f2b92657890a:45405-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/data/default/TestLogRolling-testLogRollOnDatanodeDeath/cd688812c78d2df61acf002dfcc44a35/info/50b938b7236e4d70a3e799d85c4f7952 because midkey is the same as first or last row 2024-11-24T18:53:27,757 DEBUG [RS:0;f2b92657890a:45405-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-24T18:53:27,758 DEBUG [RS:0;f2b92657890a:45405-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: cd688812c78d2df61acf002dfcc44a35:info 2024-11-24T18:53:27,910 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@7ab0f4af[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:37367, datanodeUuid=b8ca2119-ac3b-47a1-a2c3-1d0f556c0334, infoPort=45895, infoSecurePort=0, ipcPort=42977, storageInfo=lv=-57;cid=testClusterID;nsid=1784485339;c=1732474383219):Failed to transfer BP-829498211-172.17.0.2-1732474383219:blk_1073741850_1033 to 127.0.0.1:32973 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T18:53:27,910 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@74770451[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:37367, datanodeUuid=b8ca2119-ac3b-47a1-a2c3-1d0f556c0334, infoPort=45895, infoSecurePort=0, ipcPort=42977, storageInfo=lv=-57;cid=testClusterID;nsid=1784485339;c=1732474383219):Failed to transfer BP-829498211-172.17.0.2-1732474383219:blk_1073741845_1028 to 127.0.0.1:43133 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T18:53:28,045 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:32783,DS-8bdd6e5e-876e-4f73-9203-c2f186854fcc,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T18:53:28,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45405 {}] regionserver.HRegion(8855): Flush requested on cd688812c78d2df61acf002dfcc44a35 2024-11-24T18:53:28,643 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing cd688812c78d2df61acf002dfcc44a35 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-24T18:53:28,649 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/data/default/TestLogRolling-testLogRollOnDatanodeDeath/cd688812c78d2df61acf002dfcc44a35/.tmp/info/57f84aaebae448cdbff8a22234970789 is 1079, key is tmprow/info:/1732474408641/Put/seqid=0 2024-11-24T18:53:28,651 WARN [Thread-957 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741866_1049 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T18:53:28,651 WARN [Thread-957 {}] hdfs.DataStreamer(1731): Error Recovery for BP-829498211-172.17.0.2-1732474383219:blk_1073741866_1049 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45993,DS-feb9aff5-069b-483f-95c1-d8393c723b1f,DISK], DatanodeInfoWithStorage[127.0.0.1:32783,DS-8bdd6e5e-876e-4f73-9203-c2f186854fcc,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45993,DS-feb9aff5-069b-483f-95c1-d8393c723b1f,DISK]) is bad. 2024-11-24T18:53:28,651 WARN [Thread-957 {}] hdfs.DataStreamer(1850): Abandoning BP-829498211-172.17.0.2-1732474383219:blk_1073741866_1049 2024-11-24T18:53:28,652 WARN [Thread-957 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45993,DS-feb9aff5-069b-483f-95c1-d8393c723b1f,DISK] 2024-11-24T18:53:28,653 WARN [Thread-957 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741867_1050 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T18:53:28,653 WARN [Thread-957 {}] hdfs.DataStreamer(1731): Error Recovery for BP-829498211-172.17.0.2-1732474383219:blk_1073741867_1050 in pipeline [DatanodeInfoWithStorage[127.0.0.1:32973,DS-7c4fcb9f-751f-4a33-b466-2d1e1999346e,DISK], DatanodeInfoWithStorage[127.0.0.1:43133,DS-0716b5d6-ffd7-4bc5-9c59-8ee7cde1e954,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:32973,DS-7c4fcb9f-751f-4a33-b466-2d1e1999346e,DISK]) is bad. 2024-11-24T18:53:28,653 WARN [Thread-957 {}] hdfs.DataStreamer(1850): Abandoning BP-829498211-172.17.0.2-1732474383219:blk_1073741867_1050 2024-11-24T18:53:28,654 WARN [Thread-957 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:32973,DS-7c4fcb9f-751f-4a33-b466-2d1e1999346e,DISK] 2024-11-24T18:53:28,657 WARN [Thread-957 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741868_1051 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:32783 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T18:53:28,657 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1902154818_22 at /127.0.0.1:60648 [Receiving block BP-829498211-172.17.0.2-1732474383219:blk_1073741868_1051] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e6689f1-50c4-dda5-193a-9d3c069fe268/cluster_18358266-8dd0-4c2e-1aeb-38759830aa93/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e6689f1-50c4-dda5-193a-9d3c069fe268/cluster_18358266-8dd0-4c2e-1aeb-38759830aa93/data/data6]'}, localName='127.0.0.1:37367', datanodeUuid='b8ca2119-ac3b-47a1-a2c3-1d0f556c0334', xmitsInProgress=0}:Exception transferring block BP-829498211-172.17.0.2-1732474383219:blk_1073741868_1051 to mirror 127.0.0.1:32783 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T18:53:28,657 WARN [Thread-957 {}] hdfs.DataStreamer(1731): Error Recovery for BP-829498211-172.17.0.2-1732474383219:blk_1073741868_1051 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37367,DS-ca721525-2760-4643-ab64-d1434814772c,DISK], DatanodeInfoWithStorage[127.0.0.1:32783,DS-8bdd6e5e-876e-4f73-9203-c2f186854fcc,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:32783,DS-8bdd6e5e-876e-4f73-9203-c2f186854fcc,DISK]) is bad. 2024-11-24T18:53:28,657 WARN [Thread-957 {}] hdfs.DataStreamer(1850): Abandoning BP-829498211-172.17.0.2-1732474383219:blk_1073741868_1051 2024-11-24T18:53:28,657 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1902154818_22 at /127.0.0.1:60648 [Receiving block BP-829498211-172.17.0.2-1732474383219:blk_1073741868_1051] {}] datanode.BlockReceiver(316): Block 1073741868 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-24T18:53:28,657 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1902154818_22 at /127.0.0.1:60648 [Receiving block BP-829498211-172.17.0.2-1732474383219:blk_1073741868_1051] {}] datanode.DataXceiver(331): 127.0.0.1:37367:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60648 dst: /127.0.0.1:37367 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T18:53:28,658 WARN [Thread-957 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:32783,DS-8bdd6e5e-876e-4f73-9203-c2f186854fcc,DISK] 2024-11-24T18:53:28,659 WARN [Thread-957 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741869_1052 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T18:53:28,659 WARN [Thread-957 {}] hdfs.DataStreamer(1731): Error Recovery for BP-829498211-172.17.0.2-1732474383219:blk_1073741869_1052 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43133,DS-0716b5d6-ffd7-4bc5-9c59-8ee7cde1e954,DISK], DatanodeInfoWithStorage[127.0.0.1:37367,DS-ca721525-2760-4643-ab64-d1434814772c,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:43133,DS-0716b5d6-ffd7-4bc5-9c59-8ee7cde1e954,DISK]) is bad. 2024-11-24T18:53:28,659 WARN [Thread-957 {}] hdfs.DataStreamer(1850): Abandoning BP-829498211-172.17.0.2-1732474383219:blk_1073741869_1052 2024-11-24T18:53:28,660 WARN [Thread-957 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43133,DS-0716b5d6-ffd7-4bc5-9c59-8ee7cde1e954,DISK] 2024-11-24T18:53:28,661 WARN [IPC Server handler 4 on default port 37713 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-24T18:53:28,661 WARN [IPC Server handler 4 on default port 37713 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-24T18:53:28,661 WARN [IPC Server handler 4 on default port 37713 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-24T18:53:28,664 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37367 is added to blk_1073741870_1053 (size=6027) 2024-11-24T18:53:28,909 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@74770451[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:37367, datanodeUuid=b8ca2119-ac3b-47a1-a2c3-1d0f556c0334, infoPort=45895, infoSecurePort=0, ipcPort=42977, storageInfo=lv=-57;cid=testClusterID;nsid=1784485339;c=1732474383219):Failed to transfer BP-829498211-172.17.0.2-1732474383219:blk_1073741840_1023 to 127.0.0.1:32783 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T18:53:28,909 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@7ab0f4af[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:37367, datanodeUuid=b8ca2119-ac3b-47a1-a2c3-1d0f556c0334, infoPort=45895, infoSecurePort=0, ipcPort=42977, storageInfo=lv=-57;cid=testClusterID;nsid=1784485339;c=1732474383219):Failed to transfer BP-829498211-172.17.0.2-1732474383219:blk_1073741860_1043 to 127.0.0.1:43133 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T18:53:29,065 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=45 (bloomFilter=true), to=hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/data/default/TestLogRolling-testLogRollOnDatanodeDeath/cd688812c78d2df61acf002dfcc44a35/.tmp/info/57f84aaebae448cdbff8a22234970789 2024-11-24T18:53:29,073 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/data/default/TestLogRolling-testLogRollOnDatanodeDeath/cd688812c78d2df61acf002dfcc44a35/.tmp/info/57f84aaebae448cdbff8a22234970789 as hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/data/default/TestLogRolling-testLogRollOnDatanodeDeath/cd688812c78d2df61acf002dfcc44a35/info/57f84aaebae448cdbff8a22234970789 2024-11-24T18:53:29,079 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/data/default/TestLogRolling-testLogRollOnDatanodeDeath/cd688812c78d2df61acf002dfcc44a35/info/57f84aaebae448cdbff8a22234970789, entries=1, sequenceid=45, filesize=5.9 K 2024-11-24T18:53:29,081 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=2.10 KB/2150 for cd688812c78d2df61acf002dfcc44a35 in 439ms, sequenceid=45, compaction requested=false 2024-11-24T18:53:29,081 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for cd688812c78d2df61acf002dfcc44a35: 2024-11-24T18:53:29,081 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=23.5 K, sizeToCheck=16.0 K 2024-11-24T18:53:29,081 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-24T18:53:29,081 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/data/default/TestLogRolling-testLogRollOnDatanodeDeath/cd688812c78d2df61acf002dfcc44a35/info/50b938b7236e4d70a3e799d85c4f7952 because midkey is the same as first or last row 2024-11-24T18:53:29,191 INFO [regionserver/f2b92657890a:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:32783,DS-8bdd6e5e-876e-4f73-9203-c2f186854fcc,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T18:53:29,191 WARN [regionserver/f2b92657890a:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37367,DS-ca721525-2760-4643-ab64-d1434814772c,DISK]] 2024-11-24T18:53:29,191 DEBUG [regionserver/f2b92657890a:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog f2b92657890a%2C45405%2C1732474385930:(num 1732474407150) roll requested 2024-11-24T18:53:29,191 INFO [regionserver/f2b92657890a:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor f2b92657890a%2C45405%2C1732474385930.1732474409191 2024-11-24T18:53:29,193 INFO [regionserver/f2b92657890a:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:32783,DS-8bdd6e5e-876e-4f73-9203-c2f186854fcc,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T18:53:29,194 WARN [Thread-962 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741871_1054 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:32783 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T18:53:29,194 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1902154818_22 at /127.0.0.1:60660 [Receiving block BP-829498211-172.17.0.2-1732474383219:blk_1073741871_1054] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e6689f1-50c4-dda5-193a-9d3c069fe268/cluster_18358266-8dd0-4c2e-1aeb-38759830aa93/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e6689f1-50c4-dda5-193a-9d3c069fe268/cluster_18358266-8dd0-4c2e-1aeb-38759830aa93/data/data6]'}, localName='127.0.0.1:37367', datanodeUuid='b8ca2119-ac3b-47a1-a2c3-1d0f556c0334', xmitsInProgress=0}:Exception transferring block BP-829498211-172.17.0.2-1732474383219:blk_1073741871_1054 to mirror 127.0.0.1:32783 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T18:53:29,195 WARN [Thread-962 {}] hdfs.DataStreamer(1731): Error Recovery for BP-829498211-172.17.0.2-1732474383219:blk_1073741871_1054 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37367,DS-ca721525-2760-4643-ab64-d1434814772c,DISK], DatanodeInfoWithStorage[127.0.0.1:32783,DS-8bdd6e5e-876e-4f73-9203-c2f186854fcc,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:32783,DS-8bdd6e5e-876e-4f73-9203-c2f186854fcc,DISK]) is bad. 2024-11-24T18:53:29,195 WARN [Thread-962 {}] hdfs.DataStreamer(1850): Abandoning BP-829498211-172.17.0.2-1732474383219:blk_1073741871_1054 2024-11-24T18:53:29,195 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1902154818_22 at /127.0.0.1:60660 [Receiving block BP-829498211-172.17.0.2-1732474383219:blk_1073741871_1054] {}] datanode.BlockReceiver(316): Block 1073741871 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-24T18:53:29,195 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1902154818_22 at /127.0.0.1:60660 [Receiving block BP-829498211-172.17.0.2-1732474383219:blk_1073741871_1054] {}] datanode.DataXceiver(331): 127.0.0.1:37367:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60660 dst: /127.0.0.1:37367 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T18:53:29,195 WARN [Thread-962 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:32783,DS-8bdd6e5e-876e-4f73-9203-c2f186854fcc,DISK] 2024-11-24T18:53:29,196 WARN [Thread-962 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741872_1055 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T18:53:29,196 WARN [Thread-962 {}] hdfs.DataStreamer(1731): Error Recovery for BP-829498211-172.17.0.2-1732474383219:blk_1073741872_1055 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45993,DS-feb9aff5-069b-483f-95c1-d8393c723b1f,DISK], DatanodeInfoWithStorage[127.0.0.1:37367,DS-ca721525-2760-4643-ab64-d1434814772c,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45993,DS-feb9aff5-069b-483f-95c1-d8393c723b1f,DISK]) is bad. 2024-11-24T18:53:29,196 WARN [Thread-962 {}] hdfs.DataStreamer(1850): Abandoning BP-829498211-172.17.0.2-1732474383219:blk_1073741872_1055 2024-11-24T18:53:29,197 WARN [Thread-962 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45993,DS-feb9aff5-069b-483f-95c1-d8393c723b1f,DISK] 2024-11-24T18:53:29,198 WARN [Thread-962 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741873_1056 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T18:53:29,198 WARN [Thread-962 {}] hdfs.DataStreamer(1731): Error Recovery for BP-829498211-172.17.0.2-1732474383219:blk_1073741873_1056 in pipeline [DatanodeInfoWithStorage[127.0.0.1:32973,DS-7c4fcb9f-751f-4a33-b466-2d1e1999346e,DISK], DatanodeInfoWithStorage[127.0.0.1:37367,DS-ca721525-2760-4643-ab64-d1434814772c,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:32973,DS-7c4fcb9f-751f-4a33-b466-2d1e1999346e,DISK]) is bad. 2024-11-24T18:53:29,198 WARN [Thread-962 {}] hdfs.DataStreamer(1850): Abandoning BP-829498211-172.17.0.2-1732474383219:blk_1073741873_1056 2024-11-24T18:53:29,198 WARN [Thread-962 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:32973,DS-7c4fcb9f-751f-4a33-b466-2d1e1999346e,DISK] 2024-11-24T18:53:29,200 WARN [Thread-962 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741874_1057 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:43133 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T18:53:29,200 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1902154818_22 at /127.0.0.1:60668 [Receiving block BP-829498211-172.17.0.2-1732474383219:blk_1073741874_1057] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e6689f1-50c4-dda5-193a-9d3c069fe268/cluster_18358266-8dd0-4c2e-1aeb-38759830aa93/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e6689f1-50c4-dda5-193a-9d3c069fe268/cluster_18358266-8dd0-4c2e-1aeb-38759830aa93/data/data6]'}, localName='127.0.0.1:37367', datanodeUuid='b8ca2119-ac3b-47a1-a2c3-1d0f556c0334', xmitsInProgress=0}:Exception transferring block BP-829498211-172.17.0.2-1732474383219:blk_1073741874_1057 to mirror 127.0.0.1:43133 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T18:53:29,200 WARN [Thread-962 {}] hdfs.DataStreamer(1731): Error Recovery for BP-829498211-172.17.0.2-1732474383219:blk_1073741874_1057 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37367,DS-ca721525-2760-4643-ab64-d1434814772c,DISK], DatanodeInfoWithStorage[127.0.0.1:43133,DS-0716b5d6-ffd7-4bc5-9c59-8ee7cde1e954,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:43133,DS-0716b5d6-ffd7-4bc5-9c59-8ee7cde1e954,DISK]) is bad. 2024-11-24T18:53:29,200 WARN [Thread-962 {}] hdfs.DataStreamer(1850): Abandoning BP-829498211-172.17.0.2-1732474383219:blk_1073741874_1057 2024-11-24T18:53:29,200 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1902154818_22 at /127.0.0.1:60668 [Receiving block BP-829498211-172.17.0.2-1732474383219:blk_1073741874_1057] {}] datanode.BlockReceiver(316): Block 1073741874 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-24T18:53:29,200 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1902154818_22 at /127.0.0.1:60668 [Receiving block BP-829498211-172.17.0.2-1732474383219:blk_1073741874_1057] {}] datanode.DataXceiver(331): 127.0.0.1:37367:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60668 dst: /127.0.0.1:37367 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T18:53:29,201 WARN [Thread-962 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43133,DS-0716b5d6-ffd7-4bc5-9c59-8ee7cde1e954,DISK] 2024-11-24T18:53:29,201 WARN [IPC Server handler 4 on default port 37713 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-24T18:53:29,202 WARN [IPC Server handler 4 on default port 37713 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-24T18:53:29,202 WARN [IPC Server handler 4 on default port 37713 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-24T18:53:29,204 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:53:29,204 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:53:29,205 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:53:29,205 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:53:29,205 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:53:29,205 INFO [regionserver/f2b92657890a:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.1732474407150 with entries=15, filesize=13.26 KB; new WAL /user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.1732474409191 2024-11-24T18:53:29,206 DEBUG [regionserver/f2b92657890a:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45895:45895)] 2024-11-24T18:53:29,206 DEBUG [regionserver/f2b92657890a:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.1732474386585 is not closed yet, will try archiving it next time 2024-11-24T18:53:29,206 DEBUG [regionserver/f2b92657890a:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.1732474407150 is not closed yet, will try archiving it next time 2024-11-24T18:53:29,206 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.1732474403135 to hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/oldWALs/f2b92657890a%2C45405%2C1732474385930.1732474403135 2024-11-24T18:53:29,207 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37367 is added to blk_1073741855_1038 (size=13591) 2024-11-24T18:53:29,608 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.1732474386585 is not closed yet, will try archiving it next time 2024-11-24T18:53:30,045 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:32783,DS-8bdd6e5e-876e-4f73-9203-c2f186854fcc,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T18:53:30,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45405 {}] regionserver.HRegion(8855): Flush requested on cd688812c78d2df61acf002dfcc44a35 2024-11-24T18:53:30,064 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing cd688812c78d2df61acf002dfcc44a35 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-24T18:53:30,069 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/data/default/TestLogRolling-testLogRollOnDatanodeDeath/cd688812c78d2df61acf002dfcc44a35/.tmp/info/90242436841449db87bec14641a1c86b is 1079, key is tmprow/info:/1732474410062/Put/seqid=0 2024-11-24T18:53:30,071 WARN [Thread-968 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741876_1059 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T18:53:30,071 WARN [Thread-968 {}] hdfs.DataStreamer(1731): Error Recovery for BP-829498211-172.17.0.2-1732474383219:blk_1073741876_1059 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45993,DS-feb9aff5-069b-483f-95c1-d8393c723b1f,DISK], DatanodeInfoWithStorage[127.0.0.1:32973,DS-7c4fcb9f-751f-4a33-b466-2d1e1999346e,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45993,DS-feb9aff5-069b-483f-95c1-d8393c723b1f,DISK]) is bad. 2024-11-24T18:53:30,071 WARN [Thread-968 {}] hdfs.DataStreamer(1850): Abandoning BP-829498211-172.17.0.2-1732474383219:blk_1073741876_1059 2024-11-24T18:53:30,072 WARN [Thread-968 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45993,DS-feb9aff5-069b-483f-95c1-d8393c723b1f,DISK] 2024-11-24T18:53:30,074 WARN [Thread-968 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741877_1060 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T18:53:30,074 WARN [Thread-968 {}] hdfs.DataStreamer(1731): Error Recovery for BP-829498211-172.17.0.2-1732474383219:blk_1073741877_1060 in pipeline [DatanodeInfoWithStorage[127.0.0.1:32783,DS-8bdd6e5e-876e-4f73-9203-c2f186854fcc,DISK], DatanodeInfoWithStorage[127.0.0.1:37367,DS-ca721525-2760-4643-ab64-d1434814772c,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:32783,DS-8bdd6e5e-876e-4f73-9203-c2f186854fcc,DISK]) is bad. 2024-11-24T18:53:30,074 WARN [Thread-968 {}] hdfs.DataStreamer(1850): Abandoning BP-829498211-172.17.0.2-1732474383219:blk_1073741877_1060 2024-11-24T18:53:30,075 WARN [Thread-968 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:32783,DS-8bdd6e5e-876e-4f73-9203-c2f186854fcc,DISK] 2024-11-24T18:53:30,078 WARN [Thread-968 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741878_1061 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:32973 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T18:53:30,078 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1902154818_22 at /127.0.0.1:60672 [Receiving block BP-829498211-172.17.0.2-1732474383219:blk_1073741878_1061] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e6689f1-50c4-dda5-193a-9d3c069fe268/cluster_18358266-8dd0-4c2e-1aeb-38759830aa93/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e6689f1-50c4-dda5-193a-9d3c069fe268/cluster_18358266-8dd0-4c2e-1aeb-38759830aa93/data/data6]'}, localName='127.0.0.1:37367', datanodeUuid='b8ca2119-ac3b-47a1-a2c3-1d0f556c0334', xmitsInProgress=0}:Exception transferring block BP-829498211-172.17.0.2-1732474383219:blk_1073741878_1061 to mirror 127.0.0.1:32973 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T18:53:30,078 WARN [Thread-968 {}] hdfs.DataStreamer(1731): Error Recovery for BP-829498211-172.17.0.2-1732474383219:blk_1073741878_1061 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37367,DS-ca721525-2760-4643-ab64-d1434814772c,DISK], DatanodeInfoWithStorage[127.0.0.1:32973,DS-7c4fcb9f-751f-4a33-b466-2d1e1999346e,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:32973,DS-7c4fcb9f-751f-4a33-b466-2d1e1999346e,DISK]) is bad. 2024-11-24T18:53:30,078 WARN [Thread-968 {}] hdfs.DataStreamer(1850): Abandoning BP-829498211-172.17.0.2-1732474383219:blk_1073741878_1061 2024-11-24T18:53:30,078 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1902154818_22 at /127.0.0.1:60672 [Receiving block BP-829498211-172.17.0.2-1732474383219:blk_1073741878_1061] {}] datanode.BlockReceiver(316): Block 1073741878 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-24T18:53:30,078 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1902154818_22 at /127.0.0.1:60672 [Receiving block BP-829498211-172.17.0.2-1732474383219:blk_1073741878_1061] {}] datanode.DataXceiver(331): 127.0.0.1:37367:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60672 dst: /127.0.0.1:37367 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T18:53:30,079 WARN [Thread-968 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:32973,DS-7c4fcb9f-751f-4a33-b466-2d1e1999346e,DISK] 2024-11-24T18:53:30,081 WARN [Thread-968 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741879_1062 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T18:53:30,081 WARN [Thread-968 {}] hdfs.DataStreamer(1731): Error Recovery for BP-829498211-172.17.0.2-1732474383219:blk_1073741879_1062 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43133,DS-0716b5d6-ffd7-4bc5-9c59-8ee7cde1e954,DISK], DatanodeInfoWithStorage[127.0.0.1:37367,DS-ca721525-2760-4643-ab64-d1434814772c,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:43133,DS-0716b5d6-ffd7-4bc5-9c59-8ee7cde1e954,DISK]) is bad. 2024-11-24T18:53:30,081 WARN [Thread-968 {}] hdfs.DataStreamer(1850): Abandoning BP-829498211-172.17.0.2-1732474383219:blk_1073741879_1062 2024-11-24T18:53:30,082 WARN [Thread-968 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43133,DS-0716b5d6-ffd7-4bc5-9c59-8ee7cde1e954,DISK] 2024-11-24T18:53:30,082 WARN [IPC Server handler 1 on default port 37713 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-24T18:53:30,082 WARN [IPC Server handler 1 on default port 37713 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-24T18:53:30,083 WARN [IPC Server handler 1 on default port 37713 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-24T18:53:30,087 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37367 is added to blk_1073741880_1063 (size=6027) 2024-11-24T18:53:30,088 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=55 (bloomFilter=true), to=hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/data/default/TestLogRolling-testLogRollOnDatanodeDeath/cd688812c78d2df61acf002dfcc44a35/.tmp/info/90242436841449db87bec14641a1c86b 2024-11-24T18:53:30,095 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/data/default/TestLogRolling-testLogRollOnDatanodeDeath/cd688812c78d2df61acf002dfcc44a35/.tmp/info/90242436841449db87bec14641a1c86b as hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/data/default/TestLogRolling-testLogRollOnDatanodeDeath/cd688812c78d2df61acf002dfcc44a35/info/90242436841449db87bec14641a1c86b 2024-11-24T18:53:30,102 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/data/default/TestLogRolling-testLogRollOnDatanodeDeath/cd688812c78d2df61acf002dfcc44a35/info/90242436841449db87bec14641a1c86b, entries=1, sequenceid=55, filesize=5.9 K 2024-11-24T18:53:30,103 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=0 B/0 for cd688812c78d2df61acf002dfcc44a35 in 40ms, sequenceid=55, compaction requested=true 2024-11-24T18:53:30,103 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for cd688812c78d2df61acf002dfcc44a35: 2024-11-24T18:53:30,103 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=29.3 K, sizeToCheck=16.0 K 2024-11-24T18:53:30,103 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-24T18:53:30,103 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/data/default/TestLogRolling-testLogRollOnDatanodeDeath/cd688812c78d2df61acf002dfcc44a35/info/50b938b7236e4d70a3e799d85c4f7952 because midkey is the same as first or last row 2024-11-24T18:53:30,103 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store cd688812c78d2df61acf002dfcc44a35:info, priority=-2147483648, current under compaction store size is 1 2024-11-24T18:53:30,104 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-24T18:53:30,104 DEBUG [RS:0;f2b92657890a:45405-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-24T18:53:30,105 DEBUG [RS:0;f2b92657890a:45405-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 30048 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-24T18:53:30,105 DEBUG [RS:0;f2b92657890a:45405-shortCompactions-0 {}] regionserver.HStore(1541): cd688812c78d2df61acf002dfcc44a35/info is initiating minor compaction (all files) 2024-11-24T18:53:30,105 INFO [RS:0;f2b92657890a:45405-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of cd688812c78d2df61acf002dfcc44a35/info in TestLogRolling-testLogRollOnDatanodeDeath,,1732474387223.cd688812c78d2df61acf002dfcc44a35. 2024-11-24T18:53:30,105 INFO [RS:0;f2b92657890a:45405-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/data/default/TestLogRolling-testLogRollOnDatanodeDeath/cd688812c78d2df61acf002dfcc44a35/info/50b938b7236e4d70a3e799d85c4f7952, hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/data/default/TestLogRolling-testLogRollOnDatanodeDeath/cd688812c78d2df61acf002dfcc44a35/info/57f84aaebae448cdbff8a22234970789, hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/data/default/TestLogRolling-testLogRollOnDatanodeDeath/cd688812c78d2df61acf002dfcc44a35/info/90242436841449db87bec14641a1c86b] into tmpdir=hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/data/default/TestLogRolling-testLogRollOnDatanodeDeath/cd688812c78d2df61acf002dfcc44a35/.tmp, totalSize=29.3 K 2024-11-24T18:53:30,106 DEBUG [RS:0;f2b92657890a:45405-shortCompactions-0 {}] compactions.Compactor(225): Compacting 50b938b7236e4d70a3e799d85c4f7952, keycount=12, bloomtype=ROW, size=17.6 K, encoding=NONE, compression=NONE, seqNum=34, earliestPutTs=1732474401147 2024-11-24T18:53:30,106 DEBUG [RS:0;f2b92657890a:45405-shortCompactions-0 {}] compactions.Compactor(225): Compacting 57f84aaebae448cdbff8a22234970789, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=45, earliestPutTs=1732474408641 2024-11-24T18:53:30,107 DEBUG [RS:0;f2b92657890a:45405-shortCompactions-0 {}] compactions.Compactor(225): Compacting 90242436841449db87bec14641a1c86b, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1732474410062 2024-11-24T18:53:30,124 INFO [RS:0;f2b92657890a:45405-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): cd688812c78d2df61acf002dfcc44a35#info#compaction#24 average throughput is 6.16 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-24T18:53:30,125 DEBUG [RS:0;f2b92657890a:45405-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/data/default/TestLogRolling-testLogRollOnDatanodeDeath/cd688812c78d2df61acf002dfcc44a35/.tmp/info/fb8f8ed48b7a47d085bca2f8d0eabf84 is 1080, key is row0002/info:/1732474401147/Put/seqid=0 2024-11-24T18:53:30,126 WARN [Thread-973 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741881_1064 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T18:53:30,127 WARN [Thread-973 {}] hdfs.DataStreamer(1731): Error Recovery for BP-829498211-172.17.0.2-1732474383219:blk_1073741881_1064 in pipeline [DatanodeInfoWithStorage[127.0.0.1:32973,DS-7c4fcb9f-751f-4a33-b466-2d1e1999346e,DISK], DatanodeInfoWithStorage[127.0.0.1:32783,DS-8bdd6e5e-876e-4f73-9203-c2f186854fcc,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:32973,DS-7c4fcb9f-751f-4a33-b466-2d1e1999346e,DISK]) is bad. 2024-11-24T18:53:30,127 WARN [Thread-973 {}] hdfs.DataStreamer(1850): Abandoning BP-829498211-172.17.0.2-1732474383219:blk_1073741881_1064 2024-11-24T18:53:30,127 WARN [Thread-973 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:32973,DS-7c4fcb9f-751f-4a33-b466-2d1e1999346e,DISK] 2024-11-24T18:53:30,128 WARN [Thread-973 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741882_1065 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T18:53:30,129 WARN [Thread-973 {}] hdfs.DataStreamer(1731): Error Recovery for BP-829498211-172.17.0.2-1732474383219:blk_1073741882_1065 in pipeline [DatanodeInfoWithStorage[127.0.0.1:32783,DS-8bdd6e5e-876e-4f73-9203-c2f186854fcc,DISK], DatanodeInfoWithStorage[127.0.0.1:43133,DS-0716b5d6-ffd7-4bc5-9c59-8ee7cde1e954,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:32783,DS-8bdd6e5e-876e-4f73-9203-c2f186854fcc,DISK]) is bad. 2024-11-24T18:53:30,129 WARN [Thread-973 {}] hdfs.DataStreamer(1850): Abandoning BP-829498211-172.17.0.2-1732474383219:blk_1073741882_1065 2024-11-24T18:53:30,129 WARN [Thread-973 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:32783,DS-8bdd6e5e-876e-4f73-9203-c2f186854fcc,DISK] 2024-11-24T18:53:30,130 WARN [Thread-973 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741883_1066 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T18:53:30,131 WARN [Thread-973 {}] hdfs.DataStreamer(1731): Error Recovery for BP-829498211-172.17.0.2-1732474383219:blk_1073741883_1066 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45993,DS-feb9aff5-069b-483f-95c1-d8393c723b1f,DISK], DatanodeInfoWithStorage[127.0.0.1:43133,DS-0716b5d6-ffd7-4bc5-9c59-8ee7cde1e954,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45993,DS-feb9aff5-069b-483f-95c1-d8393c723b1f,DISK]) is bad. 2024-11-24T18:53:30,131 WARN [Thread-973 {}] hdfs.DataStreamer(1850): Abandoning BP-829498211-172.17.0.2-1732474383219:blk_1073741883_1066 2024-11-24T18:53:30,131 WARN [Thread-973 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45993,DS-feb9aff5-069b-483f-95c1-d8393c723b1f,DISK] 2024-11-24T18:53:30,132 WARN [Thread-973 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741884_1067 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T18:53:30,132 WARN [Thread-973 {}] hdfs.DataStreamer(1731): Error Recovery for BP-829498211-172.17.0.2-1732474383219:blk_1073741884_1067 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43133,DS-0716b5d6-ffd7-4bc5-9c59-8ee7cde1e954,DISK], DatanodeInfoWithStorage[127.0.0.1:37367,DS-ca721525-2760-4643-ab64-d1434814772c,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:43133,DS-0716b5d6-ffd7-4bc5-9c59-8ee7cde1e954,DISK]) is bad. 2024-11-24T18:53:30,132 WARN [Thread-973 {}] hdfs.DataStreamer(1850): Abandoning BP-829498211-172.17.0.2-1732474383219:blk_1073741884_1067 2024-11-24T18:53:30,133 WARN [Thread-973 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43133,DS-0716b5d6-ffd7-4bc5-9c59-8ee7cde1e954,DISK] 2024-11-24T18:53:30,134 WARN [IPC Server handler 2 on default port 37713 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-24T18:53:30,134 WARN [IPC Server handler 2 on default port 37713 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-24T18:53:30,134 WARN [IPC Server handler 2 on default port 37713 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-24T18:53:30,138 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37367 is added to blk_1073741885_1068 (size=18097) 2024-11-24T18:53:30,148 DEBUG [RS:0;f2b92657890a:45405-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/data/default/TestLogRolling-testLogRollOnDatanodeDeath/cd688812c78d2df61acf002dfcc44a35/.tmp/info/fb8f8ed48b7a47d085bca2f8d0eabf84 as hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/data/default/TestLogRolling-testLogRollOnDatanodeDeath/cd688812c78d2df61acf002dfcc44a35/info/fb8f8ed48b7a47d085bca2f8d0eabf84 2024-11-24T18:53:30,181 INFO [RS:0;f2b92657890a:45405-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in cd688812c78d2df61acf002dfcc44a35/info of cd688812c78d2df61acf002dfcc44a35 into fb8f8ed48b7a47d085bca2f8d0eabf84(size=17.7 K), total size for store is 17.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-24T18:53:30,181 DEBUG [RS:0;f2b92657890a:45405-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for cd688812c78d2df61acf002dfcc44a35: 2024-11-24T18:53:30,181 INFO [RS:0;f2b92657890a:45405-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRollOnDatanodeDeath,,1732474387223.cd688812c78d2df61acf002dfcc44a35., storeName=cd688812c78d2df61acf002dfcc44a35/info, priority=13, startTime=1732474410103; duration=0sec 2024-11-24T18:53:30,181 DEBUG [RS:0;f2b92657890a:45405-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.7 K, sizeToCheck=16.0 K 2024-11-24T18:53:30,181 DEBUG [RS:0;f2b92657890a:45405-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-24T18:53:30,181 DEBUG [RS:0;f2b92657890a:45405-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/data/default/TestLogRolling-testLogRollOnDatanodeDeath/cd688812c78d2df61acf002dfcc44a35/info/fb8f8ed48b7a47d085bca2f8d0eabf84 because midkey is the same as first or last row 2024-11-24T18:53:30,181 DEBUG [RS:0;f2b92657890a:45405-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.7 K, sizeToCheck=16.0 K 2024-11-24T18:53:30,181 DEBUG [RS:0;f2b92657890a:45405-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-24T18:53:30,181 DEBUG [RS:0;f2b92657890a:45405-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/data/default/TestLogRolling-testLogRollOnDatanodeDeath/cd688812c78d2df61acf002dfcc44a35/info/fb8f8ed48b7a47d085bca2f8d0eabf84 because midkey is the same as first or last row 2024-11-24T18:53:30,181 DEBUG [RS:0;f2b92657890a:45405-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.7 K, sizeToCheck=16.0 K 2024-11-24T18:53:30,181 DEBUG [RS:0;f2b92657890a:45405-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-24T18:53:30,181 DEBUG [RS:0;f2b92657890a:45405-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/data/default/TestLogRolling-testLogRollOnDatanodeDeath/cd688812c78d2df61acf002dfcc44a35/info/fb8f8ed48b7a47d085bca2f8d0eabf84 because midkey is the same as first or last row 2024-11-24T18:53:30,181 DEBUG [RS:0;f2b92657890a:45405-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-24T18:53:30,181 DEBUG [RS:0;f2b92657890a:45405-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: cd688812c78d2df61acf002dfcc44a35:info 2024-11-24T18:53:30,911 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@74770451[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:37367, datanodeUuid=b8ca2119-ac3b-47a1-a2c3-1d0f556c0334, infoPort=45895, infoSecurePort=0, ipcPort=42977, storageInfo=lv=-57;cid=testClusterID;nsid=1784485339;c=1732474383219):Failed to transfer BP-829498211-172.17.0.2-1732474383219:blk_1073741870_1053 to 127.0.0.1:43133 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T18:53:30,911 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@7ab0f4af[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:37367, datanodeUuid=b8ca2119-ac3b-47a1-a2c3-1d0f556c0334, infoPort=45895, infoSecurePort=0, ipcPort=42977, storageInfo=lv=-57;cid=testClusterID;nsid=1784485339;c=1732474383219):Failed to transfer BP-829498211-172.17.0.2-1732474383219:blk_1073741865_1048 to 127.0.0.1:32783 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T18:53:31,194 INFO [regionserver/f2b92657890a:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:32783,DS-8bdd6e5e-876e-4f73-9203-c2f186854fcc,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T18:53:31,206 INFO [regionserver/f2b92657890a:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:32783,DS-8bdd6e5e-876e-4f73-9203-c2f186854fcc,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T18:53:31,206 WARN [regionserver/f2b92657890a:0.logRoller {}] wal.FSHLog(539): Too many consecutive RollWriter requests, it's a sign of the total number of live datanodes is lower than the tolerable replicas. 2024-11-24T18:53:31,297 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T18:53:31,300 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-24T18:53:31,301 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-24T18:53:31,301 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-24T18:53:31,301 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-24T18:53:31,302 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@441dcfc4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e6689f1-50c4-dda5-193a-9d3c069fe268/hadoop.log.dir/,AVAILABLE} 2024-11-24T18:53:31,302 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@381443d3{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-24T18:53:31,393 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@14b00457{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e6689f1-50c4-dda5-193a-9d3c069fe268/java.io.tmpdir/jetty-localhost-38019-hadoop-hdfs-3_4_1-tests_jar-_-any-12648716228206544419/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T18:53:31,394 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@22c6c03b{HTTP/1.1, (http/1.1)}{localhost:38019} 2024-11-24T18:53:31,394 INFO [Time-limited test {}] server.Server(415): Started @140746ms 2024-11-24T18:53:31,395 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-24T18:53:31,911 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@7ab0f4af[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:37367, datanodeUuid=b8ca2119-ac3b-47a1-a2c3-1d0f556c0334, infoPort=45895, infoSecurePort=0, ipcPort=42977, storageInfo=lv=-57;cid=testClusterID;nsid=1784485339;c=1732474383219):Failed to transfer BP-829498211-172.17.0.2-1732474383219:blk_1073741855_1038 to 127.0.0.1:43133 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T18:53:31,911 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@74770451[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:37367, datanodeUuid=b8ca2119-ac3b-47a1-a2c3-1d0f556c0334, infoPort=45895, infoSecurePort=0, ipcPort=42977, storageInfo=lv=-57;cid=testClusterID;nsid=1784485339;c=1732474383219):Failed to transfer BP-829498211-172.17.0.2-1732474383219:blk_1073741880_1063 to 127.0.0.1:45993 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T18:53:32,045 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:32783,DS-8bdd6e5e-876e-4f73-9203-c2f186854fcc,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T18:53:32,351 WARN [Thread-991 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-24T18:53:32,358 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xe05ab7210ed63d51 with lease ID 0xddbbb5e26caa0125: from storage DS-7c4fcb9f-751f-4a33-b466-2d1e1999346e node DatanodeRegistration(127.0.0.1:38455, datanodeUuid=adeb25d7-9417-4898-a7a7-6d9e033f0ea2, infoPort=36089, infoSecurePort=0, ipcPort=42651, storageInfo=lv=-57;cid=testClusterID;nsid=1784485339;c=1732474383219), blocks: 6, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-24T18:53:32,359 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xe05ab7210ed63d51 with lease ID 0xddbbb5e26caa0125: from storage DS-dae4e14c-1528-4a86-9b43-100d74912dd0 node DatanodeRegistration(127.0.0.1:38455, datanodeUuid=adeb25d7-9417-4898-a7a7-6d9e033f0ea2, infoPort=36089, infoSecurePort=0, ipcPort=42651, storageInfo=lv=-57;cid=testClusterID;nsid=1784485339;c=1732474383219), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T18:53:33,195 INFO [regionserver/f2b92657890a:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:32783,DS-8bdd6e5e-876e-4f73-9203-c2f186854fcc,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T18:53:33,207 INFO [regionserver/f2b92657890a:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:32783,DS-8bdd6e5e-876e-4f73-9203-c2f186854fcc,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T18:53:33,909 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@74770451[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:37367, datanodeUuid=b8ca2119-ac3b-47a1-a2c3-1d0f556c0334, infoPort=45895, infoSecurePort=0, ipcPort=42977, storageInfo=lv=-57;cid=testClusterID;nsid=1784485339;c=1732474383219):Failed to transfer BP-829498211-172.17.0.2-1732474383219:blk_1073741885_1068 to 127.0.0.1:43133 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T18:53:34,046 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:32783,DS-8bdd6e5e-876e-4f73-9203-c2f186854fcc,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T18:53:35,195 INFO [regionserver/f2b92657890a:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:32783,DS-8bdd6e5e-876e-4f73-9203-c2f186854fcc,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T18:53:35,207 INFO [regionserver/f2b92657890a:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:32783,DS-8bdd6e5e-876e-4f73-9203-c2f186854fcc,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T18:53:35,737 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-24T18:53:36,046 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:32783,DS-8bdd6e5e-876e-4f73-9203-c2f186854fcc,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T18:53:36,286 ERROR [FSHLog-0-hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/MasterData-prefix:f2b92657890a,41719,1732474385757 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:32783,DS-8bdd6e5e-876e-4f73-9203-c2f186854fcc,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T18:53:36,286 WARN [FSHLog-0-hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/MasterData-prefix:f2b92657890a,41719,1732474385757 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:32783,DS-8bdd6e5e-876e-4f73-9203-c2f186854fcc,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T18:53:36,286 DEBUG [master:store-WAL-Roller {}] wal.AbstractWALRoller(198): WAL FSHLog f2b92657890a%2C41719%2C1732474385757:(num 1732474386067) roll requested 2024-11-24T18:53:36,287 INFO [master:store-WAL-Roller {}] monitor.StreamSlowMonitor(122): New stream slow monitor f2b92657890a%2C41719%2C1732474385757.1732474416287 2024-11-24T18:53:36,291 WARN [Thread-1011 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741886_1069 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:43133 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T18:53:36,291 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_333290757_22 at /127.0.0.1:45546 [Receiving block BP-829498211-172.17.0.2-1732474383219:blk_1073741886_1069] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e6689f1-50c4-dda5-193a-9d3c069fe268/cluster_18358266-8dd0-4c2e-1aeb-38759830aa93/data/data3, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e6689f1-50c4-dda5-193a-9d3c069fe268/cluster_18358266-8dd0-4c2e-1aeb-38759830aa93/data/data4]'}, localName='127.0.0.1:38455', datanodeUuid='adeb25d7-9417-4898-a7a7-6d9e033f0ea2', xmitsInProgress=0}:Exception transferring block BP-829498211-172.17.0.2-1732474383219:blk_1073741886_1069 to mirror 127.0.0.1:43133 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T18:53:36,292 WARN [Thread-1011 {}] hdfs.DataStreamer(1731): Error Recovery for BP-829498211-172.17.0.2-1732474383219:blk_1073741886_1069 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38455,DS-7c4fcb9f-751f-4a33-b466-2d1e1999346e,DISK], DatanodeInfoWithStorage[127.0.0.1:43133,DS-0716b5d6-ffd7-4bc5-9c59-8ee7cde1e954,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:43133,DS-0716b5d6-ffd7-4bc5-9c59-8ee7cde1e954,DISK]) is bad. 2024-11-24T18:53:36,292 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_333290757_22 at /127.0.0.1:45546 [Receiving block BP-829498211-172.17.0.2-1732474383219:blk_1073741886_1069] {}] datanode.BlockReceiver(316): Block 1073741886 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-24T18:53:36,292 WARN [Thread-1011 {}] hdfs.DataStreamer(1850): Abandoning BP-829498211-172.17.0.2-1732474383219:blk_1073741886_1069 2024-11-24T18:53:36,292 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_333290757_22 at /127.0.0.1:45546 [Receiving block BP-829498211-172.17.0.2-1732474383219:blk_1073741886_1069] {}] datanode.DataXceiver(331): 127.0.0.1:38455:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:45546 dst: /127.0.0.1:38455 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T18:53:36,293 WARN [Thread-1011 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43133,DS-0716b5d6-ffd7-4bc5-9c59-8ee7cde1e954,DISK] 2024-11-24T18:53:36,295 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_333290757_22 at /127.0.0.1:35350 [Receiving block BP-829498211-172.17.0.2-1732474383219:blk_1073741887_1070] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e6689f1-50c4-dda5-193a-9d3c069fe268/cluster_18358266-8dd0-4c2e-1aeb-38759830aa93/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e6689f1-50c4-dda5-193a-9d3c069fe268/cluster_18358266-8dd0-4c2e-1aeb-38759830aa93/data/data6]'}, localName='127.0.0.1:37367', datanodeUuid='b8ca2119-ac3b-47a1-a2c3-1d0f556c0334', xmitsInProgress=0}:Exception transferring block BP-829498211-172.17.0.2-1732474383219:blk_1073741887_1070 to mirror 127.0.0.1:45993 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T18:53:36,295 WARN [Thread-1011 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741887_1070 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:45993 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T18:53:36,296 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_333290757_22 at /127.0.0.1:35350 [Receiving block BP-829498211-172.17.0.2-1732474383219:blk_1073741887_1070] {}] datanode.BlockReceiver(316): Block 1073741887 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-24T18:53:36,296 WARN [Thread-1011 {}] hdfs.DataStreamer(1731): Error Recovery for BP-829498211-172.17.0.2-1732474383219:blk_1073741887_1070 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37367,DS-ca721525-2760-4643-ab64-d1434814772c,DISK], DatanodeInfoWithStorage[127.0.0.1:45993,DS-feb9aff5-069b-483f-95c1-d8393c723b1f,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:45993,DS-feb9aff5-069b-483f-95c1-d8393c723b1f,DISK]) is bad. 2024-11-24T18:53:36,296 WARN [Thread-1011 {}] hdfs.DataStreamer(1850): Abandoning BP-829498211-172.17.0.2-1732474383219:blk_1073741887_1070 2024-11-24T18:53:36,296 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_333290757_22 at /127.0.0.1:35350 [Receiving block BP-829498211-172.17.0.2-1732474383219:blk_1073741887_1070] {}] datanode.DataXceiver(331): 127.0.0.1:37367:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35350 dst: /127.0.0.1:37367 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T18:53:36,297 WARN [Thread-1011 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45993,DS-feb9aff5-069b-483f-95c1-d8393c723b1f,DISK] 2024-11-24T18:53:36,303 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:53:36,303 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:53:36,304 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:53:36,304 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:53:36,304 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:53:36,304 INFO [master:store-WAL-Roller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/MasterData/WALs/f2b92657890a,41719,1732474385757/f2b92657890a%2C41719%2C1732474385757.1732474386067 with entries=54, filesize=26.67 KB; new WAL /user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/MasterData/WALs/f2b92657890a,41719,1732474385757/f2b92657890a%2C41719%2C1732474385757.1732474416287 2024-11-24T18:53:36,305 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:32783,DS-8bdd6e5e-876e-4f73-9203-c2f186854fcc,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T18:53:36,305 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:32783,DS-8bdd6e5e-876e-4f73-9203-c2f186854fcc,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T18:53:36,305 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36089:36089),(127.0.0.1/127.0.0.1:45895:45895)] 2024-11-24T18:53:36,305 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/MasterData/WALs/f2b92657890a,41719,1732474385757/f2b92657890a%2C41719%2C1732474385757.1732474386067 2024-11-24T18:53:36,305 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(879): hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/MasterData/WALs/f2b92657890a,41719,1732474385757/f2b92657890a%2C41719%2C1732474385757.1732474386067 is not closed yet, will try archiving it next time 2024-11-24T18:53:36,306 WARN [IPC Server handler 0 on default port 37713 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/MasterData/WALs/f2b92657890a,41719,1732474385757/f2b92657890a%2C41719%2C1732474385757.1732474386067 has not been closed. Lease recovery is in progress. RecoveryId = 1072 for block blk_1073741830_1006 2024-11-24T18:53:36,306 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/MasterData/WALs/f2b92657890a,41719,1732474385757/f2b92657890a%2C41719%2C1732474385757.1732474386067 after 1ms 2024-11-24T18:53:37,195 INFO [regionserver/f2b92657890a:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:32783,DS-8bdd6e5e-876e-4f73-9203-c2f186854fcc,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T18:53:37,207 INFO [regionserver/f2b92657890a:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:32783,DS-8bdd6e5e-876e-4f73-9203-c2f186854fcc,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T18:53:39,196 INFO [regionserver/f2b92657890a:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:32783,DS-8bdd6e5e-876e-4f73-9203-c2f186854fcc,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T18:53:39,208 INFO [regionserver/f2b92657890a:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:32783,DS-8bdd6e5e-876e-4f73-9203-c2f186854fcc,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T18:53:40,307 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/MasterData/WALs/f2b92657890a,41719,1732474385757/f2b92657890a%2C41719%2C1732474385757.1732474386067 after 4002ms 2024-11-24T18:53:40,356 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@65eb832[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:38455, datanodeUuid=adeb25d7-9417-4898-a7a7-6d9e033f0ea2, infoPort=36089, infoSecurePort=0, ipcPort=42651, storageInfo=lv=-57;cid=testClusterID;nsid=1784485339;c=1732474383219):Failed to transfer BP-829498211-172.17.0.2-1732474383219:blk_1073741831_1007 to 127.0.0.1:45993 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T18:53:40,356 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@6af850bf[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:38455, datanodeUuid=adeb25d7-9417-4898-a7a7-6d9e033f0ea2, infoPort=36089, infoSecurePort=0, ipcPort=42651, storageInfo=lv=-57;cid=testClusterID;nsid=1784485339;c=1732474383219):Failed to transfer BP-829498211-172.17.0.2-1732474383219:blk_1073741835_1011 to 127.0.0.1:45993 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T18:53:41,196 INFO [regionserver/f2b92657890a:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:32783,DS-8bdd6e5e-876e-4f73-9203-c2f186854fcc,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T18:53:41,208 INFO [regionserver/f2b92657890a:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:32783,DS-8bdd6e5e-876e-4f73-9203-c2f186854fcc,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T18:53:41,355 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@6af850bf[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:38455, datanodeUuid=adeb25d7-9417-4898-a7a7-6d9e033f0ea2, infoPort=36089, infoSecurePort=0, ipcPort=42651, storageInfo=lv=-57;cid=testClusterID;nsid=1784485339;c=1732474383219):Failed to transfer BP-829498211-172.17.0.2-1732474383219:blk_1073741827_1003 to 127.0.0.1:45993 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T18:53:41,358 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37367 is added to blk_1073741829_1005 (size=34) 2024-11-24T18:53:42,371 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@6671477d {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-829498211-172.17.0.2-1732474383219:blk_1073741833_1009, datanode=DatanodeInfoWithStorage[127.0.0.1:32783,null,null]) java.net.ConnectException: Call From f2b92657890a/172.17.0.2 to localhost:36637 failed on connection exception: java.net.ConnectException: Connection refused; For more details see: http://wiki.apache.org/hadoop/ConnectionRefused at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:876) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:668) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-11-24T18:53:42,374 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38455 is added to blk_1073741833_1020 (size=455) 2024-11-24T18:53:43,167 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.1732474386585 to hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/oldWALs/f2b92657890a%2C45405%2C1732474385930.1732474386585 2024-11-24T18:53:43,168 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.1732474407150 to hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/oldWALs/f2b92657890a%2C45405%2C1732474385930.1732474407150 2024-11-24T18:53:43,196 INFO [regionserver/f2b92657890a:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:32783,DS-8bdd6e5e-876e-4f73-9203-c2f186854fcc,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T18:53:43,208 INFO [regionserver/f2b92657890a:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:32783,DS-8bdd6e5e-876e-4f73-9203-c2f186854fcc,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T18:53:43,356 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@65eb832[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:38455, datanodeUuid=adeb25d7-9417-4898-a7a7-6d9e033f0ea2, infoPort=36089, infoSecurePort=0, ipcPort=42651, storageInfo=lv=-57;cid=testClusterID;nsid=1784485339;c=1732474383219):Failed to transfer BP-829498211-172.17.0.2-1732474383219:blk_1073741826_1002 to 127.0.0.1:43133 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T18:53:43,356 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@6af850bf[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:38455, datanodeUuid=adeb25d7-9417-4898-a7a7-6d9e033f0ea2, infoPort=36089, infoSecurePort=0, ipcPort=42651, storageInfo=lv=-57;cid=testClusterID;nsid=1784485339;c=1732474383219):Failed to transfer BP-829498211-172.17.0.2-1732474383219:blk_1073741833_1020 to 127.0.0.1:45993 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T18:53:45,039 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor f2b92657890a%2C45405%2C1732474385930.1732474425039 2024-11-24T18:53:45,042 WARN [Thread-1026 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741889_1073 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T18:53:45,042 WARN [Thread-1026 {}] hdfs.DataStreamer(1731): Error Recovery for BP-829498211-172.17.0.2-1732474383219:blk_1073741889_1073 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45993,DS-feb9aff5-069b-483f-95c1-d8393c723b1f,DISK], DatanodeInfoWithStorage[127.0.0.1:37367,DS-ca721525-2760-4643-ab64-d1434814772c,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45993,DS-feb9aff5-069b-483f-95c1-d8393c723b1f,DISK]) is bad. 2024-11-24T18:53:45,042 WARN [Thread-1026 {}] hdfs.DataStreamer(1850): Abandoning BP-829498211-172.17.0.2-1732474383219:blk_1073741889_1073 2024-11-24T18:53:45,042 WARN [Thread-1026 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45993,DS-feb9aff5-069b-483f-95c1-d8393c723b1f,DISK] 2024-11-24T18:53:45,046 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:53:45,046 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:53:45,046 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:53:45,046 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:53:45,047 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:53:45,047 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.1732474409191 with entries=13, filesize=12.60 KB; new WAL /user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.1732474425039 2024-11-24T18:53:45,048 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37367 is added to blk_1073741875_1058 (size=12911) 2024-11-24T18:53:45,049 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36089:36089),(127.0.0.1/127.0.0.1:45895:45895)] 2024-11-24T18:53:45,049 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.1732474409191 is not closed yet, will try archiving it next time 2024-11-24T18:53:45,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45405 {}] regionserver.HRegion(8855): Flush requested on cd688812c78d2df61acf002dfcc44a35 2024-11-24T18:53:45,054 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing cd688812c78d2df61acf002dfcc44a35 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-24T18:53:45,068 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/data/default/TestLogRolling-testLogRollOnDatanodeDeath/cd688812c78d2df61acf002dfcc44a35/.tmp/info/bd63adad94a545df904c7aeaf430e5c7 is 1080, key is row0013/info:/1732474425050/Put/seqid=0 2024-11-24T18:53:45,071 WARN [Thread-1032 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741891_1075 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:45993 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T18:53:45,071 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1902154818_22 at /127.0.0.1:43906 [Receiving block BP-829498211-172.17.0.2-1732474383219:blk_1073741891_1075] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e6689f1-50c4-dda5-193a-9d3c069fe268/cluster_18358266-8dd0-4c2e-1aeb-38759830aa93/data/data3, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e6689f1-50c4-dda5-193a-9d3c069fe268/cluster_18358266-8dd0-4c2e-1aeb-38759830aa93/data/data4]'}, localName='127.0.0.1:38455', datanodeUuid='adeb25d7-9417-4898-a7a7-6d9e033f0ea2', xmitsInProgress=0}:Exception transferring block BP-829498211-172.17.0.2-1732474383219:blk_1073741891_1075 to mirror 127.0.0.1:45993 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T18:53:45,071 WARN [Thread-1032 {}] hdfs.DataStreamer(1731): Error Recovery for BP-829498211-172.17.0.2-1732474383219:blk_1073741891_1075 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38455,DS-7c4fcb9f-751f-4a33-b466-2d1e1999346e,DISK], DatanodeInfoWithStorage[127.0.0.1:45993,DS-feb9aff5-069b-483f-95c1-d8393c723b1f,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:45993,DS-feb9aff5-069b-483f-95c1-d8393c723b1f,DISK]) is bad. 2024-11-24T18:53:45,071 WARN [Thread-1032 {}] hdfs.DataStreamer(1850): Abandoning BP-829498211-172.17.0.2-1732474383219:blk_1073741891_1075 2024-11-24T18:53:45,071 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1902154818_22 at /127.0.0.1:43906 [Receiving block BP-829498211-172.17.0.2-1732474383219:blk_1073741891_1075] {}] datanode.BlockReceiver(316): Block 1073741891 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-24T18:53:45,071 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1902154818_22 at /127.0.0.1:43906 [Receiving block BP-829498211-172.17.0.2-1732474383219:blk_1073741891_1075] {}] datanode.DataXceiver(331): 127.0.0.1:38455:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:43906 dst: /127.0.0.1:38455 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T18:53:45,072 WARN [Thread-1032 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45993,DS-feb9aff5-069b-483f-95c1-d8393c723b1f,DISK] 2024-11-24T18:53:45,079 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37367 is added to blk_1073741892_1076 (size=8190) 2024-11-24T18:53:45,079 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38455 is added to blk_1073741892_1076 (size=8190) 2024-11-24T18:53:45,080 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=66 (bloomFilter=true), to=hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/data/default/TestLogRolling-testLogRollOnDatanodeDeath/cd688812c78d2df61acf002dfcc44a35/.tmp/info/bd63adad94a545df904c7aeaf430e5c7 2024-11-24T18:53:45,088 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/data/default/TestLogRolling-testLogRollOnDatanodeDeath/cd688812c78d2df61acf002dfcc44a35/.tmp/info/bd63adad94a545df904c7aeaf430e5c7 as hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/data/default/TestLogRolling-testLogRollOnDatanodeDeath/cd688812c78d2df61acf002dfcc44a35/info/bd63adad94a545df904c7aeaf430e5c7 2024-11-24T18:53:45,096 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/data/default/TestLogRolling-testLogRollOnDatanodeDeath/cd688812c78d2df61acf002dfcc44a35/info/bd63adad94a545df904c7aeaf430e5c7, entries=3, sequenceid=66, filesize=8.0 K 2024-11-24T18:53:45,097 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7527, heapSize ~8.11 KB/8304, currentSize=9.46 KB/9683 for cd688812c78d2df61acf002dfcc44a35 in 43ms, sequenceid=66, compaction requested=false 2024-11-24T18:53:45,097 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for cd688812c78d2df61acf002dfcc44a35: 2024-11-24T18:53:45,098 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=25.7 K, sizeToCheck=16.0 K 2024-11-24T18:53:45,098 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-24T18:53:45,098 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/data/default/TestLogRolling-testLogRollOnDatanodeDeath/cd688812c78d2df61acf002dfcc44a35/info/fb8f8ed48b7a47d085bca2f8d0eabf84 because midkey is the same as first or last row 2024-11-24T18:53:45,197 INFO [regionserver/f2b92657890a:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:32783,DS-8bdd6e5e-876e-4f73-9203-c2f186854fcc,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T18:53:45,209 INFO [regionserver/f2b92657890a:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:32783,DS-8bdd6e5e-876e-4f73-9203-c2f186854fcc,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T18:53:45,209 INFO [regionserver/f2b92657890a:0.logRoller {}] wal.FSHLog(556): LowReplication-Roller was enabled. 2024-11-24T18:53:45,276 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-24T18:53:45,277 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-24T18:53:45,277 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-24T18:53:45,277 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T18:53:45,277 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T18:53:45,277 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-24T18:53:45,277 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-24T18:53:45,277 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=627172387, stopped=false 2024-11-24T18:53:45,278 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=f2b92657890a,41719,1732474385757 2024-11-24T18:53:45,377 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43977-0x1016e3130f80002, quorum=127.0.0.1:49552, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-24T18:53:45,377 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41719-0x1016e3130f80000, quorum=127.0.0.1:49552, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-24T18:53:45,377 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45405-0x1016e3130f80001, quorum=127.0.0.1:49552, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-24T18:53:45,377 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43977-0x1016e3130f80002, quorum=127.0.0.1:49552, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T18:53:45,377 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41719-0x1016e3130f80000, quorum=127.0.0.1:49552, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T18:53:45,378 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-24T18:53:45,378 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-24T18:53:45,378 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-24T18:53:45,378 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T18:53:45,378 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'f2b92657890a,45405,1732474385930' ***** 2024-11-24T18:53:45,378 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-24T18:53:45,378 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'f2b92657890a,43977,1732474387094' ***** 2024-11-24T18:53:45,378 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-24T18:53:45,378 INFO [RS:0;f2b92657890a:45405 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-24T18:53:45,379 INFO [RS:1;f2b92657890a:43977 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-24T18:53:45,379 INFO [RS:0;f2b92657890a:45405 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-24T18:53:45,379 INFO [RS:1;f2b92657890a:43977 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-24T18:53:45,379 INFO [RS:0;f2b92657890a:45405 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-24T18:53:45,379 INFO [RS:1;f2b92657890a:43977 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-24T18:53:45,379 INFO [RS:0;f2b92657890a:45405 {}] regionserver.HRegionServer(3091): Received CLOSE for cd688812c78d2df61acf002dfcc44a35 2024-11-24T18:53:45,379 INFO [RS:1;f2b92657890a:43977 {}] regionserver.HRegionServer(959): stopping server f2b92657890a,43977,1732474387094 2024-11-24T18:53:45,379 INFO [RS:1;f2b92657890a:43977 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-24T18:53:45,379 INFO [RS:1;f2b92657890a:43977 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;f2b92657890a:43977. 2024-11-24T18:53:45,379 DEBUG [RS:1;f2b92657890a:43977 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-24T18:53:45,379 INFO [RS:0;f2b92657890a:45405 {}] regionserver.HRegionServer(959): stopping server f2b92657890a,45405,1732474385930 2024-11-24T18:53:45,379 DEBUG [RS:1;f2b92657890a:43977 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T18:53:45,379 INFO [RS:0;f2b92657890a:45405 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-24T18:53:45,379 INFO [RS:1;f2b92657890a:43977 {}] regionserver.HRegionServer(976): stopping server f2b92657890a,43977,1732474387094; all regions closed. 2024-11-24T18:53:45,379 INFO [RS:0;f2b92657890a:45405 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;f2b92657890a:45405. 2024-11-24T18:53:45,379 DEBUG [RS_CLOSE_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing cd688812c78d2df61acf002dfcc44a35, disabling compactions & flushes 2024-11-24T18:53:45,379 DEBUG [RS:0;f2b92657890a:45405 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-24T18:53:45,379 DEBUG [RS:0;f2b92657890a:45405 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T18:53:45,379 INFO [RS_CLOSE_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnDatanodeDeath,,1732474387223.cd688812c78d2df61acf002dfcc44a35. 2024-11-24T18:53:45,379 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-24T18:53:45,379 DEBUG [RS_CLOSE_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1732474387223.cd688812c78d2df61acf002dfcc44a35. 2024-11-24T18:53:45,379 INFO [RS:0;f2b92657890a:45405 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-24T18:53:45,379 DEBUG [RS_CLOSE_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1732474387223.cd688812c78d2df61acf002dfcc44a35. after waiting 0 ms 2024-11-24T18:53:45,380 DEBUG [RS_CLOSE_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnDatanodeDeath,,1732474387223.cd688812c78d2df61acf002dfcc44a35. 2024-11-24T18:53:45,380 INFO [RS_CLOSE_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing cd688812c78d2df61acf002dfcc44a35 1/1 column families, dataSize=9.46 KB heapSize=10.38 KB 2024-11-24T18:53:45,381 INFO [RS:0;f2b92657890a:45405 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-24T18:53:45,381 INFO [RS:0;f2b92657890a:45405 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-24T18:53:45,381 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:53:45,381 INFO [RS:0;f2b92657890a:45405 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-24T18:53:45,381 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:53:45,381 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:53:45,381 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:53:45,381 INFO [RS:0;f2b92657890a:45405 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-24T18:53:45,381 DEBUG [RS:0;f2b92657890a:45405 {}] regionserver.HRegionServer(1325): Online Regions={cd688812c78d2df61acf002dfcc44a35=TestLogRolling-testLogRollOnDatanodeDeath,,1732474387223.cd688812c78d2df61acf002dfcc44a35., 1588230740=hbase:meta,,1.1588230740} 2024-11-24T18:53:45,381 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:53:45,381 DEBUG [RS:0;f2b92657890a:45405 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, cd688812c78d2df61acf002dfcc44a35 2024-11-24T18:53:45,381 DEBUG [RS_CLOSE_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-24T18:53:45,381 INFO [RS_CLOSE_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-24T18:53:45,382 DEBUG [RS_CLOSE_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-24T18:53:45,382 DEBUG [RS_CLOSE_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-24T18:53:45,382 DEBUG [RS_CLOSE_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-24T18:53:45,382 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:32783,DS-8bdd6e5e-876e-4f73-9203-c2f186854fcc,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T18:53:45,382 INFO [RS_CLOSE_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.71 KB heapSize=3.75 KB 2024-11-24T18:53:45,382 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:32783,DS-8bdd6e5e-876e-4f73-9203-c2f186854fcc,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T18:53:45,382 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,43977,1732474387094/f2b92657890a%2C43977%2C1732474387094.1732474387326 2024-11-24T18:53:45,382 ERROR [FSHLog-0-hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98-prefix:f2b92657890a,45405,1732474385930.meta {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:32783,DS-8bdd6e5e-876e-4f73-9203-c2f186854fcc,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T18:53:45,382 WARN [FSHLog-0-hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98-prefix:f2b92657890a,45405,1732474385930.meta {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:32783,DS-8bdd6e5e-876e-4f73-9203-c2f186854fcc,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T18:53:45,382 DEBUG [regionserver/f2b92657890a:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog f2b92657890a%2C45405%2C1732474385930.meta:.meta(num 1732474386931) roll requested 2024-11-24T18:53:45,383 WARN [IPC Server handler 3 on default port 37713 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,43977,1732474387094/f2b92657890a%2C43977%2C1732474387094.1732474387326 has not been closed. Lease recovery is in progress. RecoveryId = 1077 for block blk_1073741837_1013 2024-11-24T18:53:45,383 INFO [regionserver/f2b92657890a:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor f2b92657890a%2C45405%2C1732474385930.meta.1732474425383.meta 2024-11-24T18:53:45,383 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,43977,1732474387094/f2b92657890a%2C43977%2C1732474387094.1732474387326 after 1ms 2024-11-24T18:53:45,384 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-24T18:53:45,384 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:43977-0x1016e3130f80002, quorum=127.0.0.1:49552, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-24T18:53:45,384 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:41719-0x1016e3130f80000, quorum=127.0.0.1:49552, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-24T18:53:45,385 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45405-0x1016e3130f80001, quorum=127.0.0.1:49552, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T18:53:45,385 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:45405-0x1016e3130f80001, quorum=127.0.0.1:49552, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-24T18:53:45,386 DEBUG [RS_CLOSE_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/data/default/TestLogRolling-testLogRollOnDatanodeDeath/cd688812c78d2df61acf002dfcc44a35/.tmp/info/cc7c59541bfa4787b6576a8c8ee54a63 is 1080, key is row0015/info:/1732474425055/Put/seqid=0 2024-11-24T18:53:45,392 WARN [Thread-1042 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741894_1079 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T18:53:45,392 WARN [Thread-1042 {}] hdfs.DataStreamer(1731): Error Recovery for BP-829498211-172.17.0.2-1732474383219:blk_1073741894_1079 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45993,DS-feb9aff5-069b-483f-95c1-d8393c723b1f,DISK], DatanodeInfoWithStorage[127.0.0.1:38455,DS-7c4fcb9f-751f-4a33-b466-2d1e1999346e,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45993,DS-feb9aff5-069b-483f-95c1-d8393c723b1f,DISK]) is bad. 2024-11-24T18:53:45,392 WARN [Thread-1042 {}] hdfs.DataStreamer(1850): Abandoning BP-829498211-172.17.0.2-1732474383219:blk_1073741894_1079 2024-11-24T18:53:45,393 WARN [Thread-1042 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45993,DS-feb9aff5-069b-483f-95c1-d8393c723b1f,DISK] 2024-11-24T18:53:45,409 WARN [Thread-1041 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741893_1078 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:45993 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T18:53:45,409 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1902154818_22 at /127.0.0.1:34264 [Receiving block BP-829498211-172.17.0.2-1732474383219:blk_1073741893_1078] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e6689f1-50c4-dda5-193a-9d3c069fe268/cluster_18358266-8dd0-4c2e-1aeb-38759830aa93/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e6689f1-50c4-dda5-193a-9d3c069fe268/cluster_18358266-8dd0-4c2e-1aeb-38759830aa93/data/data6]'}, localName='127.0.0.1:37367', datanodeUuid='b8ca2119-ac3b-47a1-a2c3-1d0f556c0334', xmitsInProgress=0}:Exception transferring block BP-829498211-172.17.0.2-1732474383219:blk_1073741893_1078 to mirror 127.0.0.1:45993 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T18:53:45,409 WARN [Thread-1041 {}] hdfs.DataStreamer(1731): Error Recovery for BP-829498211-172.17.0.2-1732474383219:blk_1073741893_1078 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37367,DS-ca721525-2760-4643-ab64-d1434814772c,DISK], DatanodeInfoWithStorage[127.0.0.1:45993,DS-feb9aff5-069b-483f-95c1-d8393c723b1f,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:45993,DS-feb9aff5-069b-483f-95c1-d8393c723b1f,DISK]) is bad. 2024-11-24T18:53:45,409 WARN [Thread-1041 {}] hdfs.DataStreamer(1850): Abandoning BP-829498211-172.17.0.2-1732474383219:blk_1073741893_1078 2024-11-24T18:53:45,409 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1902154818_22 at /127.0.0.1:34264 [Receiving block BP-829498211-172.17.0.2-1732474383219:blk_1073741893_1078] {}] datanode.BlockReceiver(316): Block 1073741893 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-24T18:53:45,410 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1902154818_22 at /127.0.0.1:34264 [Receiving block BP-829498211-172.17.0.2-1732474383219:blk_1073741893_1078] {}] datanode.DataXceiver(331): 127.0.0.1:37367:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:34264 dst: /127.0.0.1:37367 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T18:53:45,410 WARN [Thread-1041 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45993,DS-feb9aff5-069b-483f-95c1-d8393c723b1f,DISK] 2024-11-24T18:53:45,411 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:53:45,411 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:53:45,411 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:53:45,412 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:53:45,412 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:53:45,412 INFO [regionserver/f2b92657890a:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.meta.1732474386931.meta with entries=8, filesize=2.33 KB; new WAL /user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.meta.1732474425383.meta 2024-11-24T18:53:45,416 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:32783,DS-8bdd6e5e-876e-4f73-9203-c2f186854fcc,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T18:53:45,416 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:32783,DS-8bdd6e5e-876e-4f73-9203-c2f186854fcc,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T18:53:45,416 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.meta.1732474386931.meta 2024-11-24T18:53:45,417 WARN [IPC Server handler 3 on default port 37713 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.meta.1732474386931.meta has not been closed. Lease recovery is in progress. RecoveryId = 1082 for block blk_1073741834_1010 2024-11-24T18:53:45,417 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.meta.1732474386931.meta after 1ms 2024-11-24T18:53:45,422 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38455 is added to blk_1073741896_1081 (size=14660) 2024-11-24T18:53:45,422 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37367 is added to blk_1073741896_1081 (size=14660) 2024-11-24T18:53:45,422 INFO [RS_CLOSE_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=9.46 KB at sequenceid=78 (bloomFilter=true), to=hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/data/default/TestLogRolling-testLogRollOnDatanodeDeath/cd688812c78d2df61acf002dfcc44a35/.tmp/info/cc7c59541bfa4787b6576a8c8ee54a63 2024-11-24T18:53:45,424 INFO [regionserver/f2b92657890a:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-24T18:53:45,424 INFO [regionserver/f2b92657890a:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-24T18:53:45,431 DEBUG [RS_CLOSE_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/data/default/TestLogRolling-testLogRollOnDatanodeDeath/cd688812c78d2df61acf002dfcc44a35/.tmp/info/cc7c59541bfa4787b6576a8c8ee54a63 as hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/data/default/TestLogRolling-testLogRollOnDatanodeDeath/cd688812c78d2df61acf002dfcc44a35/info/cc7c59541bfa4787b6576a8c8ee54a63 2024-11-24T18:53:45,433 DEBUG [regionserver/f2b92657890a:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45895:45895),(127.0.0.1/127.0.0.1:36089:36089)] 2024-11-24T18:53:45,433 DEBUG [regionserver/f2b92657890a:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.meta.1732474386931.meta is not closed yet, will try archiving it next time 2024-11-24T18:53:45,439 INFO [RS_CLOSE_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/data/default/TestLogRolling-testLogRollOnDatanodeDeath/cd688812c78d2df61acf002dfcc44a35/info/cc7c59541bfa4787b6576a8c8ee54a63, entries=9, sequenceid=78, filesize=14.3 K 2024-11-24T18:53:45,440 INFO [RS_CLOSE_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~9.46 KB/9683, heapSize ~10.36 KB/10608, currentSize=0 B/0 for cd688812c78d2df61acf002dfcc44a35 in 60ms, sequenceid=78, compaction requested=true 2024-11-24T18:53:45,444 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732474387223.cd688812c78d2df61acf002dfcc44a35.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/data/default/TestLogRolling-testLogRollOnDatanodeDeath/cd688812c78d2df61acf002dfcc44a35/info/77e96bc6576249929f33d4381ed0d3f7, hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/data/default/TestLogRolling-testLogRollOnDatanodeDeath/cd688812c78d2df61acf002dfcc44a35/info/3c6fdc1afd34466ba41bcc786a2a92cb, hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/data/default/TestLogRolling-testLogRollOnDatanodeDeath/cd688812c78d2df61acf002dfcc44a35/info/50b938b7236e4d70a3e799d85c4f7952, hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/data/default/TestLogRolling-testLogRollOnDatanodeDeath/cd688812c78d2df61acf002dfcc44a35/info/52f25de3a2474ad3af3885786315ffae, hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/data/default/TestLogRolling-testLogRollOnDatanodeDeath/cd688812c78d2df61acf002dfcc44a35/info/57f84aaebae448cdbff8a22234970789, hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/data/default/TestLogRolling-testLogRollOnDatanodeDeath/cd688812c78d2df61acf002dfcc44a35/info/90242436841449db87bec14641a1c86b] to archive 2024-11-24T18:53:45,446 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732474387223.cd688812c78d2df61acf002dfcc44a35.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-24T18:53:45,449 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732474387223.cd688812c78d2df61acf002dfcc44a35.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/data/default/TestLogRolling-testLogRollOnDatanodeDeath/cd688812c78d2df61acf002dfcc44a35/info/77e96bc6576249929f33d4381ed0d3f7 to hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/cd688812c78d2df61acf002dfcc44a35/info/77e96bc6576249929f33d4381ed0d3f7 2024-11-24T18:53:45,450 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.1732474409191 to hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/oldWALs/f2b92657890a%2C45405%2C1732474385930.1732474409191 2024-11-24T18:53:45,452 DEBUG [RS_CLOSE_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/data/hbase/meta/1588230740/.tmp/info/73606a819327461ca5843cb43653e984 is 203, key is TestLogRolling-testLogRollOnDatanodeDeath,,1732474387223.cd688812c78d2df61acf002dfcc44a35./info:regioninfo/1732474387604/Put/seqid=0 2024-11-24T18:53:45,454 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732474387223.cd688812c78d2df61acf002dfcc44a35.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/data/default/TestLogRolling-testLogRollOnDatanodeDeath/cd688812c78d2df61acf002dfcc44a35/info/3c6fdc1afd34466ba41bcc786a2a92cb to hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/cd688812c78d2df61acf002dfcc44a35/info/3c6fdc1afd34466ba41bcc786a2a92cb 2024-11-24T18:53:45,456 WARN [Thread-1054 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741897_1083 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T18:53:45,456 WARN [Thread-1054 {}] hdfs.DataStreamer(1731): Error Recovery for BP-829498211-172.17.0.2-1732474383219:blk_1073741897_1083 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45993,DS-feb9aff5-069b-483f-95c1-d8393c723b1f,DISK], DatanodeInfoWithStorage[127.0.0.1:38455,DS-7c4fcb9f-751f-4a33-b466-2d1e1999346e,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45993,DS-feb9aff5-069b-483f-95c1-d8393c723b1f,DISK]) is bad. 2024-11-24T18:53:45,456 WARN [Thread-1054 {}] hdfs.DataStreamer(1850): Abandoning BP-829498211-172.17.0.2-1732474383219:blk_1073741897_1083 2024-11-24T18:53:45,457 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732474387223.cd688812c78d2df61acf002dfcc44a35.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/data/default/TestLogRolling-testLogRollOnDatanodeDeath/cd688812c78d2df61acf002dfcc44a35/info/50b938b7236e4d70a3e799d85c4f7952 to hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/cd688812c78d2df61acf002dfcc44a35/info/50b938b7236e4d70a3e799d85c4f7952 2024-11-24T18:53:45,457 WARN [Thread-1054 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45993,DS-feb9aff5-069b-483f-95c1-d8393c723b1f,DISK] 2024-11-24T18:53:45,458 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732474387223.cd688812c78d2df61acf002dfcc44a35.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/data/default/TestLogRolling-testLogRollOnDatanodeDeath/cd688812c78d2df61acf002dfcc44a35/info/52f25de3a2474ad3af3885786315ffae to hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/cd688812c78d2df61acf002dfcc44a35/info/52f25de3a2474ad3af3885786315ffae 2024-11-24T18:53:45,460 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732474387223.cd688812c78d2df61acf002dfcc44a35.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/data/default/TestLogRolling-testLogRollOnDatanodeDeath/cd688812c78d2df61acf002dfcc44a35/info/57f84aaebae448cdbff8a22234970789 to hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/cd688812c78d2df61acf002dfcc44a35/info/57f84aaebae448cdbff8a22234970789 2024-11-24T18:53:45,462 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732474387223.cd688812c78d2df61acf002dfcc44a35.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/data/default/TestLogRolling-testLogRollOnDatanodeDeath/cd688812c78d2df61acf002dfcc44a35/info/90242436841449db87bec14641a1c86b to hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/cd688812c78d2df61acf002dfcc44a35/info/90242436841449db87bec14641a1c86b 2024-11-24T18:53:45,463 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732474387223.cd688812c78d2df61acf002dfcc44a35.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=f2b92657890a:41719 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 16 more 2024-11-24T18:53:45,463 WARN [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732474387223.cd688812c78d2df61acf002dfcc44a35.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [77e96bc6576249929f33d4381ed0d3f7=10347, 3c6fdc1afd34466ba41bcc786a2a92cb=12506, 50b938b7236e4d70a3e799d85c4f7952=17994, 52f25de3a2474ad3af3885786315ffae=6027, 57f84aaebae448cdbff8a22234970789=6027, 90242436841449db87bec14641a1c86b=6027] 2024-11-24T18:53:45,468 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37367 is added to blk_1073741898_1084 (size=7089) 2024-11-24T18:53:45,468 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38455 is added to blk_1073741898_1084 (size=7089) 2024-11-24T18:53:45,469 INFO [RS_CLOSE_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.50 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/data/hbase/meta/1588230740/.tmp/info/73606a819327461ca5843cb43653e984 2024-11-24T18:53:45,472 DEBUG [RS_CLOSE_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/data/default/TestLogRolling-testLogRollOnDatanodeDeath/cd688812c78d2df61acf002dfcc44a35/recovered.edits/81.seqid, newMaxSeqId=81, maxSeqId=1 2024-11-24T18:53:45,473 INFO [RS_CLOSE_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1732474387223.cd688812c78d2df61acf002dfcc44a35. 2024-11-24T18:53:45,473 DEBUG [RS_CLOSE_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for cd688812c78d2df61acf002dfcc44a35: Waiting for close lock at 1732474425379Running coprocessor pre-close hooks at 1732474425379Disabling compacts and flushes for region at 1732474425379Disabling writes for close at 1732474425379Obtaining lock to block concurrent updates at 1732474425380 (+1 ms)Preparing flush snapshotting stores in cd688812c78d2df61acf002dfcc44a35 at 1732474425380Finished memstore snapshotting TestLogRolling-testLogRollOnDatanodeDeath,,1732474387223.cd688812c78d2df61acf002dfcc44a35., syncing WAL and waiting on mvcc, flushsize=dataSize=9683, getHeapSize=10608, getOffHeapSize=0, getCellsCount=9 at 1732474425380Flushing stores of TestLogRolling-testLogRollOnDatanodeDeath,,1732474387223.cd688812c78d2df61acf002dfcc44a35. at 1732474425381 (+1 ms)Flushing cd688812c78d2df61acf002dfcc44a35/info: creating writer at 1732474425381Flushing cd688812c78d2df61acf002dfcc44a35/info: appending metadata at 1732474425385 (+4 ms)Flushing cd688812c78d2df61acf002dfcc44a35/info: closing flushed file at 1732474425385Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3ee546ac: reopening flushed file at 1732474425430 (+45 ms)Finished flush of dataSize ~9.46 KB/9683, heapSize ~10.36 KB/10608, currentSize=0 B/0 for cd688812c78d2df61acf002dfcc44a35 in 60ms, sequenceid=78, compaction requested=true at 1732474425441 (+11 ms)Writing region close event to WAL at 1732474425467 (+26 ms)Running coprocessor post-close hooks at 1732474425473 (+6 ms)Closed at 1732474425473 2024-11-24T18:53:45,473 DEBUG [RS_CLOSE_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1732474387223.cd688812c78d2df61acf002dfcc44a35. 2024-11-24T18:53:45,496 DEBUG [RS_CLOSE_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/data/hbase/meta/1588230740/.tmp/ns/1f7a8b83777d41a8a4f5d28a3a0e3f63 is 43, key is default/ns:d/1732474387012/Put/seqid=0 2024-11-24T18:53:45,499 WARN [Thread-1061 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741899_1085 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:45993 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T18:53:45,499 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1902154818_22 at /127.0.0.1:43964 [Receiving block BP-829498211-172.17.0.2-1732474383219:blk_1073741899_1085] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e6689f1-50c4-dda5-193a-9d3c069fe268/cluster_18358266-8dd0-4c2e-1aeb-38759830aa93/data/data3, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e6689f1-50c4-dda5-193a-9d3c069fe268/cluster_18358266-8dd0-4c2e-1aeb-38759830aa93/data/data4]'}, localName='127.0.0.1:38455', datanodeUuid='adeb25d7-9417-4898-a7a7-6d9e033f0ea2', xmitsInProgress=0}:Exception transferring block BP-829498211-172.17.0.2-1732474383219:blk_1073741899_1085 to mirror 127.0.0.1:45993 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T18:53:45,500 WARN [Thread-1061 {}] hdfs.DataStreamer(1731): Error Recovery for BP-829498211-172.17.0.2-1732474383219:blk_1073741899_1085 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38455,DS-7c4fcb9f-751f-4a33-b466-2d1e1999346e,DISK], DatanodeInfoWithStorage[127.0.0.1:45993,DS-feb9aff5-069b-483f-95c1-d8393c723b1f,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:45993,DS-feb9aff5-069b-483f-95c1-d8393c723b1f,DISK]) is bad. 2024-11-24T18:53:45,500 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1902154818_22 at /127.0.0.1:43964 [Receiving block BP-829498211-172.17.0.2-1732474383219:blk_1073741899_1085] {}] datanode.BlockReceiver(316): Block 1073741899 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-24T18:53:45,500 WARN [Thread-1061 {}] hdfs.DataStreamer(1850): Abandoning BP-829498211-172.17.0.2-1732474383219:blk_1073741899_1085 2024-11-24T18:53:45,500 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1902154818_22 at /127.0.0.1:43964 [Receiving block BP-829498211-172.17.0.2-1732474383219:blk_1073741899_1085] {}] datanode.DataXceiver(331): 127.0.0.1:38455:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:43964 dst: /127.0.0.1:38455 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T18:53:45,501 WARN [Thread-1061 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45993,DS-feb9aff5-069b-483f-95c1-d8393c723b1f,DISK] 2024-11-24T18:53:45,518 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37367 is added to blk_1073741900_1086 (size=5153) 2024-11-24T18:53:45,520 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38455 is added to blk_1073741900_1086 (size=5153) 2024-11-24T18:53:45,520 INFO [RS_CLOSE_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/data/hbase/meta/1588230740/.tmp/ns/1f7a8b83777d41a8a4f5d28a3a0e3f63 2024-11-24T18:53:45,546 DEBUG [RS_CLOSE_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/data/hbase/meta/1588230740/.tmp/table/768aa1cea505459aa058e5c706444c84 is 77, key is TestLogRolling-testLogRollOnDatanodeDeath/table:state/1732474387614/Put/seqid=0 2024-11-24T18:53:45,548 WARN [Thread-1068 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741901_1087 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T18:53:45,548 WARN [Thread-1068 {}] hdfs.DataStreamer(1731): Error Recovery for BP-829498211-172.17.0.2-1732474383219:blk_1073741901_1087 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45993,DS-feb9aff5-069b-483f-95c1-d8393c723b1f,DISK], DatanodeInfoWithStorage[127.0.0.1:37367,DS-ca721525-2760-4643-ab64-d1434814772c,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45993,DS-feb9aff5-069b-483f-95c1-d8393c723b1f,DISK]) is bad. 2024-11-24T18:53:45,548 WARN [Thread-1068 {}] hdfs.DataStreamer(1850): Abandoning BP-829498211-172.17.0.2-1732474383219:blk_1073741901_1087 2024-11-24T18:53:45,549 WARN [Thread-1068 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45993,DS-feb9aff5-069b-483f-95c1-d8393c723b1f,DISK] 2024-11-24T18:53:45,563 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37367 is added to blk_1073741902_1088 (size=5424) 2024-11-24T18:53:45,563 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38455 is added to blk_1073741902_1088 (size=5424) 2024-11-24T18:53:45,564 INFO [RS_CLOSE_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=146 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/data/hbase/meta/1588230740/.tmp/table/768aa1cea505459aa058e5c706444c84 2024-11-24T18:53:45,572 DEBUG [RS_CLOSE_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/data/hbase/meta/1588230740/.tmp/info/73606a819327461ca5843cb43653e984 as hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/data/hbase/meta/1588230740/info/73606a819327461ca5843cb43653e984 2024-11-24T18:53:45,582 DEBUG [RS:0;f2b92657890a:45405 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-24T18:53:45,582 INFO [RS_CLOSE_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/data/hbase/meta/1588230740/info/73606a819327461ca5843cb43653e984, entries=10, sequenceid=11, filesize=6.9 K 2024-11-24T18:53:45,583 DEBUG [RS_CLOSE_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/data/hbase/meta/1588230740/.tmp/ns/1f7a8b83777d41a8a4f5d28a3a0e3f63 as hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/data/hbase/meta/1588230740/ns/1f7a8b83777d41a8a4f5d28a3a0e3f63 2024-11-24T18:53:45,590 INFO [RS_CLOSE_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/data/hbase/meta/1588230740/ns/1f7a8b83777d41a8a4f5d28a3a0e3f63, entries=2, sequenceid=11, filesize=5.0 K 2024-11-24T18:53:45,591 DEBUG [RS_CLOSE_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/data/hbase/meta/1588230740/.tmp/table/768aa1cea505459aa058e5c706444c84 as hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/data/hbase/meta/1588230740/table/768aa1cea505459aa058e5c706444c84 2024-11-24T18:53:45,599 INFO [RS_CLOSE_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/data/hbase/meta/1588230740/table/768aa1cea505459aa058e5c706444c84, entries=2, sequenceid=11, filesize=5.3 K 2024-11-24T18:53:45,600 INFO [RS_CLOSE_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.71 KB/1752, heapSize ~3.45 KB/3536, currentSize=0 B/0 for 1588230740 in 218ms, sequenceid=11, compaction requested=false 2024-11-24T18:53:45,612 DEBUG [RS_CLOSE_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-24T18:53:45,612 DEBUG [RS_CLOSE_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-24T18:53:45,612 INFO [RS_CLOSE_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-24T18:53:45,613 DEBUG [RS_CLOSE_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732474425381Running coprocessor pre-close hooks at 1732474425381Disabling compacts and flushes for region at 1732474425381Disabling writes for close at 1732474425382 (+1 ms)Obtaining lock to block concurrent updates at 1732474425382Preparing flush snapshotting stores in 1588230740 at 1732474425382Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1752, getHeapSize=3776, getOffHeapSize=0, getCellsCount=14 at 1732474425382Flushing stores of hbase:meta,,1.1588230740 at 1732474425433 (+51 ms)Flushing 1588230740/info: creating writer at 1732474425433Flushing 1588230740/info: appending metadata at 1732474425452 (+19 ms)Flushing 1588230740/info: closing flushed file at 1732474425452Flushing 1588230740/ns: creating writer at 1732474425475 (+23 ms)Flushing 1588230740/ns: appending metadata at 1732474425496 (+21 ms)Flushing 1588230740/ns: closing flushed file at 1732474425496Flushing 1588230740/table: creating writer at 1732474425527 (+31 ms)Flushing 1588230740/table: appending metadata at 1732474425545 (+18 ms)Flushing 1588230740/table: closing flushed file at 1732474425545Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@75a636c0: reopening flushed file at 1732474425571 (+26 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3a1e3433: reopening flushed file at 1732474425582 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@78c09640: reopening flushed file at 1732474425590 (+8 ms)Finished flush of dataSize ~1.71 KB/1752, heapSize ~3.45 KB/3536, currentSize=0 B/0 for 1588230740 in 218ms, sequenceid=11, compaction requested=false at 1732474425600 (+10 ms)Writing region close event to WAL at 1732474425608 (+8 ms)Running coprocessor post-close hooks at 1732474425612 (+4 ms)Closed at 1732474425612 2024-11-24T18:53:45,613 DEBUG [RS_CLOSE_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-24T18:53:45,782 INFO [RS:0;f2b92657890a:45405 {}] regionserver.HRegionServer(976): stopping server f2b92657890a,45405,1732474385930; all regions closed. 2024-11-24T18:53:45,782 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:53:45,782 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:53:45,782 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:53:45,783 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:53:45,783 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:53:45,785 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38455 is added to blk_1073741895_1080 (size=825) 2024-11-24T18:53:45,785 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37367 is added to blk_1073741895_1080 (size=825) 2024-11-24T18:53:45,911 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@74770451[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:37367, datanodeUuid=b8ca2119-ac3b-47a1-a2c3-1d0f556c0334, infoPort=45895, infoSecurePort=0, ipcPort=42977, storageInfo=lv=-57;cid=testClusterID;nsid=1784485339;c=1732474383219):Failed to transfer BP-829498211-172.17.0.2-1732474383219:blk_1073741875_1058 to 127.0.0.1:45993 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T18:53:46,201 INFO [regionserver/f2b92657890a:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-24T18:53:46,201 INFO [regionserver/f2b92657890a:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-24T18:53:46,427 INFO [regionserver/f2b92657890a:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-24T18:53:47,085 INFO [master/f2b92657890a:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-24T18:53:47,085 INFO [master/f2b92657890a:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-24T18:53:47,194 INFO [regionserver/f2b92657890a:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-24T18:53:47,356 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@6af850bf[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:38455, datanodeUuid=adeb25d7-9417-4898-a7a7-6d9e033f0ea2, infoPort=36089, infoSecurePort=0, ipcPort=42651, storageInfo=lv=-57;cid=testClusterID;nsid=1784485339;c=1732474383219):Failed to transfer BP-829498211-172.17.0.2-1732474383219:blk_1073741825_1001 to 127.0.0.1:45993 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T18:53:47,356 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@65eb832[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:38455, datanodeUuid=adeb25d7-9417-4898-a7a7-6d9e033f0ea2, infoPort=36089, infoSecurePort=0, ipcPort=42651, storageInfo=lv=-57;cid=testClusterID;nsid=1784485339;c=1732474383219):Failed to transfer BP-829498211-172.17.0.2-1732474383219:blk_1073741836_1012 to 127.0.0.1:45993 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T18:53:48,327 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath 2024-11-24T18:53:48,328 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-24T18:53:48,328 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-24T18:53:48,358 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37367 is added to blk_1073741828_1004 (size=1189) 2024-11-24T18:53:48,359 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37367 is added to blk_1073741832_1008 (size=32) 2024-11-24T18:53:49,384 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,43977,1732474387094/f2b92657890a%2C43977%2C1732474387094.1732474387326 after 4002ms 2024-11-24T18:53:49,418 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.meta.1732474386931.meta after 4002ms 2024-11-24T18:53:50,382 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.fshlog.wait.on.shutdown.seconds" 2024-11-24T18:53:50,384 DEBUG [RS:1;f2b92657890a:43977 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/oldWALs 2024-11-24T18:53:50,384 INFO [RS:1;f2b92657890a:43977 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog f2b92657890a%2C43977%2C1732474387094:(num 1732474387326) 2024-11-24T18:53:50,384 DEBUG [RS:1;f2b92657890a:43977 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T18:53:50,384 INFO [RS:1;f2b92657890a:43977 {}] regionserver.LeaseManager(133): Closed leases 2024-11-24T18:53:50,384 INFO [RS:1;f2b92657890a:43977 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-24T18:53:50,385 INFO [RS:1;f2b92657890a:43977 {}] hbase.ChoreService(370): Chore service for: regionserver/f2b92657890a:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-24T18:53:50,385 INFO [RS:1;f2b92657890a:43977 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-24T18:53:50,385 INFO [RS:1;f2b92657890a:43977 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-24T18:53:50,385 INFO [regionserver/f2b92657890a:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-24T18:53:50,385 INFO [RS:1;f2b92657890a:43977 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-24T18:53:50,385 INFO [RS:1;f2b92657890a:43977 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-24T18:53:50,385 INFO [RS:1;f2b92657890a:43977 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:43977 2024-11-24T18:53:50,390 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,43977,1732474387094/f2b92657890a%2C43977%2C1732474387094.1732474387326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.FileNotFoundException: File does not exist: /user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,43977,1732474387094/f2b92657890a%2C43977%2C1732474387094.1732474387326 at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.isFileClosed(FSDirStatAndListingOp.java:124) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.isFileClosed(FSNamesystem.java:3502) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.isFileClosed(NameNodeRpcServer.java:1248) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.isFileClosed(ClientNamenodeProtocolServerSideTranslatorPB.java:1419) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.ipc.RemoteException.instantiateException(RemoteException.java:121) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.RemoteException.unwrapRemoteException(RemoteException.java:88) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1812) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more Caused by: org.apache.hadoop.ipc.RemoteException: File does not exist: /user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,43977,1732474387094/f2b92657890a%2C43977%2C1732474387094.1732474387326 at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.isFileClosed(FSDirStatAndListingOp.java:124) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.isFileClosed(FSNamesystem.java:3502) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.isFileClosed(NameNodeRpcServer.java:1248) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.isFileClosed(ClientNamenodeProtocolServerSideTranslatorPB.java:1419) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.isFileClosed(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$isFileClosed$57(ClientNamenodeProtocolTranslatorPB.java:999) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.isFileClosed(ClientNamenodeProtocolTranslatorPB.java:999) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.isFileClosed(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.isFileClosed(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.isFileClosed(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1810) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:53:50,430 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41719-0x1016e3130f80000, quorum=127.0.0.1:49552, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-24T18:53:50,430 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43977-0x1016e3130f80002, quorum=127.0.0.1:49552, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/f2b92657890a,43977,1732474387094 2024-11-24T18:53:50,430 INFO [RS:1;f2b92657890a:43977 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-24T18:53:50,440 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [f2b92657890a,43977,1732474387094] 2024-11-24T18:53:50,451 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/f2b92657890a,43977,1732474387094 already deleted, retry=false 2024-11-24T18:53:50,451 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; f2b92657890a,43977,1732474387094 expired; onlineServers=1 2024-11-24T18:53:50,474 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:53:50,496 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:53:50,496 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:53:50,497 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:53:50,497 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:53:50,497 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:53:50,519 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:53:50,519 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:53:50,540 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43977-0x1016e3130f80002, quorum=127.0.0.1:49552, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-24T18:53:50,540 INFO [RS:1;f2b92657890a:43977 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-24T18:53:50,541 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43977-0x1016e3130f80002, quorum=127.0.0.1:49552, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-24T18:53:50,541 INFO [RS:1;f2b92657890a:43977 {}] regionserver.HRegionServer(1031): Exiting; stopping=f2b92657890a,43977,1732474387094; zookeeper connection closed. 2024-11-24T18:53:50,541 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@5559f0d8 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@5559f0d8 2024-11-24T18:53:50,783 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.fshlog.wait.on.shutdown.seconds" 2024-11-24T18:53:50,786 DEBUG [RS:0;f2b92657890a:45405 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/oldWALs 2024-11-24T18:53:50,786 INFO [RS:0;f2b92657890a:45405 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog f2b92657890a%2C45405%2C1732474385930.meta:.meta(num 1732474425383) 2024-11-24T18:53:50,787 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:53:50,787 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:53:50,787 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:53:50,787 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:53:50,787 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:53:50,789 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37367 is added to blk_1073741890_1074 (size=14682) 2024-11-24T18:53:50,789 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38455 is added to blk_1073741890_1074 (size=14682) 2024-11-24T18:53:50,791 DEBUG [RS:0;f2b92657890a:45405 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/oldWALs 2024-11-24T18:53:50,791 INFO [RS:0;f2b92657890a:45405 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog f2b92657890a%2C45405%2C1732474385930:(num 1732474425039) 2024-11-24T18:53:50,791 DEBUG [RS:0;f2b92657890a:45405 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T18:53:50,791 INFO [RS:0;f2b92657890a:45405 {}] regionserver.LeaseManager(133): Closed leases 2024-11-24T18:53:50,792 INFO [RS:0;f2b92657890a:45405 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-24T18:53:50,792 INFO [RS:0;f2b92657890a:45405 {}] hbase.ChoreService(370): Chore service for: regionserver/f2b92657890a:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-24T18:53:50,792 INFO [RS:0;f2b92657890a:45405 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-24T18:53:50,792 INFO [regionserver/f2b92657890a:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-24T18:53:50,792 INFO [RS:0;f2b92657890a:45405 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:45405 2024-11-24T18:53:50,840 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45405-0x1016e3130f80001, quorum=127.0.0.1:49552, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/f2b92657890a,45405,1732474385930 2024-11-24T18:53:50,840 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41719-0x1016e3130f80000, quorum=127.0.0.1:49552, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-24T18:53:50,840 INFO [RS:0;f2b92657890a:45405 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-24T18:53:50,841 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [f2b92657890a,45405,1732474385930] 2024-11-24T18:53:50,861 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/f2b92657890a,45405,1732474385930 already deleted, retry=false 2024-11-24T18:53:50,861 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; f2b92657890a,45405,1732474385930 expired; onlineServers=0 2024-11-24T18:53:50,861 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'f2b92657890a,41719,1732474385757' ***** 2024-11-24T18:53:50,861 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-24T18:53:50,861 INFO [M:0;f2b92657890a:41719 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-24T18:53:50,861 INFO [M:0;f2b92657890a:41719 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-24T18:53:50,861 DEBUG [M:0;f2b92657890a:41719 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-24T18:53:50,862 DEBUG [M:0;f2b92657890a:41719 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-24T18:53:50,862 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-24T18:53:50,862 DEBUG [master/f2b92657890a:0:becomeActiveMaster-HFileCleaner.small.0-1732474386286 {}] cleaner.HFileCleaner(306): Exit Thread[master/f2b92657890a:0:becomeActiveMaster-HFileCleaner.small.0-1732474386286,5,FailOnTimeoutGroup] 2024-11-24T18:53:50,862 DEBUG [master/f2b92657890a:0:becomeActiveMaster-HFileCleaner.large.0-1732474386286 {}] cleaner.HFileCleaner(306): Exit Thread[master/f2b92657890a:0:becomeActiveMaster-HFileCleaner.large.0-1732474386286,5,FailOnTimeoutGroup] 2024-11-24T18:53:50,862 INFO [M:0;f2b92657890a:41719 {}] hbase.ChoreService(370): Chore service for: master/f2b92657890a:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-24T18:53:50,862 INFO [M:0;f2b92657890a:41719 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-24T18:53:50,862 DEBUG [M:0;f2b92657890a:41719 {}] master.HMaster(1795): Stopping service threads 2024-11-24T18:53:50,862 INFO [M:0;f2b92657890a:41719 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-24T18:53:50,862 INFO [M:0;f2b92657890a:41719 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-24T18:53:50,862 INFO [M:0;f2b92657890a:41719 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-24T18:53:50,862 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-24T18:53:50,872 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41719-0x1016e3130f80000, quorum=127.0.0.1:49552, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-24T18:53:50,872 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41719-0x1016e3130f80000, quorum=127.0.0.1:49552, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T18:53:50,872 DEBUG [M:0;f2b92657890a:41719 {}] zookeeper.ZKUtil(347): master:41719-0x1016e3130f80000, quorum=127.0.0.1:49552, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-24T18:53:50,872 WARN [M:0;f2b92657890a:41719 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-24T18:53:50,872 INFO [M:0;f2b92657890a:41719 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/.lastflushedseqids 2024-11-24T18:53:50,878 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38455 is added to blk_1073741903_1089 (size=130) 2024-11-24T18:53:50,878 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37367 is added to blk_1073741903_1089 (size=130) 2024-11-24T18:53:50,951 INFO [RS:0;f2b92657890a:45405 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-24T18:53:50,951 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45405-0x1016e3130f80001, quorum=127.0.0.1:49552, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-24T18:53:50,951 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45405-0x1016e3130f80001, quorum=127.0.0.1:49552, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-24T18:53:50,951 INFO [RS:0;f2b92657890a:45405 {}] regionserver.HRegionServer(1031): Exiting; stopping=f2b92657890a,45405,1732474385930; zookeeper connection closed. 2024-11-24T18:53:50,951 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@46d606a1 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@46d606a1 2024-11-24T18:53:50,951 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 2 regionserver(s) complete 2024-11-24T18:53:51,021 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-24T18:53:51,036 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:53:51,036 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:53:51,037 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:53:51,037 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:53:51,037 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:53:51,043 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:53:51,043 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:53:51,045 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:53:51,279 INFO [M:0;f2b92657890a:41719 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-24T18:53:51,279 INFO [M:0;f2b92657890a:41719 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-24T18:53:51,279 DEBUG [M:0;f2b92657890a:41719 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-24T18:53:51,279 INFO [M:0;f2b92657890a:41719 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T18:53:51,279 DEBUG [M:0;f2b92657890a:41719 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T18:53:51,279 DEBUG [M:0;f2b92657890a:41719 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-24T18:53:51,279 DEBUG [M:0;f2b92657890a:41719 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T18:53:51,279 INFO [M:0;f2b92657890a:41719 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.25 KB heapSize=29.49 KB 2024-11-24T18:53:51,298 DEBUG [M:0;f2b92657890a:41719 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/4a8e2be0bfb049fb8c9744c6de9b5971 is 82, key is hbase:meta,,1/info:regioninfo/1732474386963/Put/seqid=0 2024-11-24T18:53:51,307 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37367 is added to blk_1073741904_1090 (size=5672) 2024-11-24T18:53:51,307 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38455 is added to blk_1073741904_1090 (size=5672) 2024-11-24T18:53:51,308 INFO [M:0;f2b92657890a:41719 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/4a8e2be0bfb049fb8c9744c6de9b5971 2024-11-24T18:53:51,330 DEBUG [M:0;f2b92657890a:41719 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/9064a008170d4f8188e6ce13c9c258fa is 774, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1732474387619/Put/seqid=0 2024-11-24T18:53:51,335 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37367 is added to blk_1073741905_1091 (size=6255) 2024-11-24T18:53:51,336 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38455 is added to blk_1073741905_1091 (size=6255) 2024-11-24T18:53:51,336 INFO [M:0;f2b92657890a:41719 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.58 KB at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/9064a008170d4f8188e6ce13c9c258fa 2024-11-24T18:53:51,341 INFO [M:0;f2b92657890a:41719 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 9064a008170d4f8188e6ce13c9c258fa 2024-11-24T18:53:51,356 DEBUG [M:0;f2b92657890a:41719 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/6d5c790c28784c108010cd2f0245a967 is 69, key is f2b92657890a,43977,1732474387094/rs:state/1732474387168/Put/seqid=0 2024-11-24T18:53:51,361 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37367 is added to blk_1073741906_1092 (size=5224) 2024-11-24T18:53:51,361 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38455 is added to blk_1073741906_1092 (size=5224) 2024-11-24T18:53:51,361 INFO [M:0;f2b92657890a:41719 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=130 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/6d5c790c28784c108010cd2f0245a967 2024-11-24T18:53:51,386 DEBUG [M:0;f2b92657890a:41719 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/c6ae08815b614654b9b0dac87bfd18b1 is 52, key is load_balancer_on/state:d/1732474387078/Put/seqid=0 2024-11-24T18:53:51,391 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,43977,1732474387094/f2b92657890a%2C43977%2C1732474387094.1732474387326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:53:51,397 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37367 is added to blk_1073741907_1093 (size=5056) 2024-11-24T18:53:51,397 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38455 is added to blk_1073741907_1093 (size=5056) 2024-11-24T18:53:51,398 INFO [M:0;f2b92657890a:41719 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/c6ae08815b614654b9b0dac87bfd18b1 2024-11-24T18:53:51,403 DEBUG [M:0;f2b92657890a:41719 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/4a8e2be0bfb049fb8c9744c6de9b5971 as hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/4a8e2be0bfb049fb8c9744c6de9b5971 2024-11-24T18:53:51,408 INFO [M:0;f2b92657890a:41719 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/4a8e2be0bfb049fb8c9744c6de9b5971, entries=8, sequenceid=60, filesize=5.5 K 2024-11-24T18:53:51,410 DEBUG [M:0;f2b92657890a:41719 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/9064a008170d4f8188e6ce13c9c258fa as hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/9064a008170d4f8188e6ce13c9c258fa 2024-11-24T18:53:51,416 INFO [M:0;f2b92657890a:41719 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 9064a008170d4f8188e6ce13c9c258fa 2024-11-24T18:53:51,416 INFO [M:0;f2b92657890a:41719 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/9064a008170d4f8188e6ce13c9c258fa, entries=6, sequenceid=60, filesize=6.1 K 2024-11-24T18:53:51,417 DEBUG [M:0;f2b92657890a:41719 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/6d5c790c28784c108010cd2f0245a967 as hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/6d5c790c28784c108010cd2f0245a967 2024-11-24T18:53:51,419 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.meta.1732474386931.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:53:51,423 INFO [M:0;f2b92657890a:41719 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/6d5c790c28784c108010cd2f0245a967, entries=2, sequenceid=60, filesize=5.1 K 2024-11-24T18:53:51,425 DEBUG [M:0;f2b92657890a:41719 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/c6ae08815b614654b9b0dac87bfd18b1 as hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/c6ae08815b614654b9b0dac87bfd18b1 2024-11-24T18:53:51,431 INFO [M:0;f2b92657890a:41719 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/c6ae08815b614654b9b0dac87bfd18b1, entries=1, sequenceid=60, filesize=4.9 K 2024-11-24T18:53:51,434 INFO [M:0;f2b92657890a:41719 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.25 KB/23805, heapSize ~29.43 KB/30136, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 155ms, sequenceid=60, compaction requested=false 2024-11-24T18:53:51,441 INFO [M:0;f2b92657890a:41719 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T18:53:51,441 DEBUG [M:0;f2b92657890a:41719 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732474431279Disabling compacts and flushes for region at 1732474431279Disabling writes for close at 1732474431279Obtaining lock to block concurrent updates at 1732474431279Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732474431279Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23805, getHeapSize=30136, getOffHeapSize=0, getCellsCount=71 at 1732474431280 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732474431281 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732474431281Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732474431297 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732474431297Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732474431315 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732474431329 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732474431330 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732474431341 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732474431356 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732474431356Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1732474431366 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1732474431385 (+19 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1732474431385Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@de2d4a8: reopening flushed file at 1732474431402 (+17 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4d8f6fe1: reopening flushed file at 1732474431409 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@24541134: reopening flushed file at 1732474431416 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@36205742: reopening flushed file at 1732474431424 (+8 ms)Finished flush of dataSize ~23.25 KB/23805, heapSize ~29.43 KB/30136, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 155ms, sequenceid=60, compaction requested=false at 1732474431434 (+10 ms)Writing region close event to WAL at 1732474431441 (+7 ms)Closed at 1732474431441 2024-11-24T18:53:51,441 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:53:51,441 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:53:51,441 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:53:51,442 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:53:51,442 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:53:51,446 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37367 is added to blk_1073741888_1071 (size=1045) 2024-11-24T18:53:51,446 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38455 is added to blk_1073741888_1071 (size=1045) 2024-11-24T18:53:52,375 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@123e6e2a {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-829498211-172.17.0.2-1732474383219:blk_1073741830_1006, datanode=DatanodeInfoWithStorage[127.0.0.1:32783,null,null]) java.net.ConnectException: Call From f2b92657890a/172.17.0.2 to localhost:36637 failed on connection exception: java.net.ConnectException: Connection refused; For more details see: http://wiki.apache.org/hadoop/ConnectionRefused at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:876) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:668) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-11-24T18:53:52,392 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,43977,1732474387094/f2b92657890a%2C43977%2C1732474387094.1732474387326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:53:52,420 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.meta.1732474386931.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:53:53,316 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/MasterData/WALs/f2b92657890a,41719,1732474385757/f2b92657890a%2C41719%2C1732474385757.1732474386067 to hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/MasterData/oldWALs/f2b92657890a%2C41719%2C1732474385757.1732474386067 2024-11-24T18:53:53,320 INFO [WAL-Archive-0 {}] region.MasterRegionUtils(50): Moved hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/MasterData/oldWALs/f2b92657890a%2C41719%2C1732474385757.1732474386067 to hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/oldWALs/f2b92657890a%2C41719%2C1732474385757.1732474386067$masterlocalwal$ 2024-11-24T18:53:53,320 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-24T18:53:53,320 INFO [M:0;f2b92657890a:41719 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-24T18:53:53,320 INFO [M:0;f2b92657890a:41719 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:41719 2024-11-24T18:53:53,320 INFO [M:0;f2b92657890a:41719 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-24T18:53:53,393 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,43977,1732474387094/f2b92657890a%2C43977%2C1732474387094.1732474387326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:53:53,420 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.meta.1732474386931.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:53:53,466 INFO [M:0;f2b92657890a:41719 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-24T18:53:53,466 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41719-0x1016e3130f80000, quorum=127.0.0.1:49552, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-24T18:53:53,466 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41719-0x1016e3130f80000, quorum=127.0.0.1:49552, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-24T18:53:53,469 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@14b00457{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T18:53:53,469 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@22c6c03b{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-24T18:53:53,469 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-24T18:53:53,469 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@381443d3{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-24T18:53:53,469 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@441dcfc4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e6689f1-50c4-dda5-193a-9d3c069fe268/hadoop.log.dir/,STOPPED} 2024-11-24T18:53:53,470 WARN [BP-829498211-172.17.0.2-1732474383219 heartbeating to localhost/127.0.0.1:37713 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-24T18:53:53,470 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-24T18:53:53,470 WARN [BP-829498211-172.17.0.2-1732474383219 heartbeating to localhost/127.0.0.1:37713 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-829498211-172.17.0.2-1732474383219 (Datanode Uuid adeb25d7-9417-4898-a7a7-6d9e033f0ea2) service to localhost/127.0.0.1:37713 2024-11-24T18:53:53,470 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-24T18:53:53,470 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@6770d9ed {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-829498211-172.17.0.2-1732474383219:blk_1073741837_1013, datanode=DatanodeInfoWithStorage[127.0.0.1:32783,null,null]) java.io.InterruptedIOException: DestHost:destPort localhost:36637 , LocalHost:localPort f2b92657890a/172.17.0.2:0. Failed on local exception: java.io.InterruptedIOException: Interrupted: action=RetryAction(action=RETRY, delayMillis=1000, reason=retries get failed due to exceeded maximum allowed retries number: 10), retry policy=RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1000 MILLISECONDS) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:936) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.InterruptedIOException: Interrupted: action=RetryAction(action=RETRY, delayMillis=1000, reason=retries get failed due to exceeded maximum allowed retries number: 10), retry policy=RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1000 MILLISECONDS) at org.apache.hadoop.ipc.Client$Connection.handleConnectionFailure(Client.java:963) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:691) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more Caused by: java.lang.InterruptedException: sleep interrupted at java.lang.Thread.sleep(Native Method) ~[?:?] at org.apache.hadoop.ipc.Client$Connection.handleConnectionFailure(Client.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:691) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-11-24T18:53:53,471 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@6770d9ed {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-829498211-172.17.0.2-1732474383219:blk_1073741837_1013, datanode=DatanodeInfoWithStorage[127.0.0.1:38455,null,null]) java.io.IOException: No block pool offer service for bpid=BP-829498211-172.17.0.2-1732474383219 at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.getDatanodeID(BlockRecoveryWorker.java:539) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$000(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:131) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T18:53:53,471 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@6770d9ed {}] datanode.BlockRecoveryWorker$1(605): recover Block: RecoveringBlock{BP-829498211-172.17.0.2-1732474383219:blk_1073741837_1013; getBlockSize()=85; corrupt=false; offset=-1; locs=[DatanodeInfoWithStorage[127.0.0.1:32783,null,null], DatanodeInfoWithStorage[127.0.0.1:38455,null,null]]; cachedLocs=[]} FAILED: java.io.IOException: All datanodes failed: block=BP-829498211-172.17.0.2-1732474383219:blk_1073741837_1013, datanodeids=[DatanodeInfoWithStorage[127.0.0.1:32783,null,null], DatanodeInfoWithStorage[127.0.0.1:38455,null,null]] 2024-11-24T18:53:53,471 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e6689f1-50c4-dda5-193a-9d3c069fe268/cluster_18358266-8dd0-4c2e-1aeb-38759830aa93/data/data3/current/BP-829498211-172.17.0.2-1732474383219 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T18:53:53,471 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@6770d9ed {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-829498211-172.17.0.2-1732474383219:blk_1073741834_1010, datanode=DatanodeInfoWithStorage[127.0.0.1:32783,null,null]) java.io.IOException: No block pool offer service for bpid=BP-829498211-172.17.0.2-1732474383219 at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.getDatanodeID(BlockRecoveryWorker.java:539) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$000(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:131) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T18:53:53,471 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e6689f1-50c4-dda5-193a-9d3c069fe268/cluster_18358266-8dd0-4c2e-1aeb-38759830aa93/data/data4/current/BP-829498211-172.17.0.2-1732474383219 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T18:53:53,471 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@6770d9ed {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-829498211-172.17.0.2-1732474383219:blk_1073741834_1010, datanode=DatanodeInfoWithStorage[127.0.0.1:38455,null,null]) java.io.IOException: No block pool offer service for bpid=BP-829498211-172.17.0.2-1732474383219 at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.getDatanodeID(BlockRecoveryWorker.java:539) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$000(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:131) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T18:53:53,471 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@6770d9ed {}] datanode.BlockRecoveryWorker$1(605): recover Block: RecoveringBlock{BP-829498211-172.17.0.2-1732474383219:blk_1073741834_1010; getBlockSize()=85; corrupt=false; offset=-1; locs=[DatanodeInfoWithStorage[127.0.0.1:32783,null,null], DatanodeInfoWithStorage[127.0.0.1:38455,null,null]]; cachedLocs=[]} FAILED: java.io.IOException: All datanodes failed: block=BP-829498211-172.17.0.2-1732474383219:blk_1073741834_1010, datanodeids=[DatanodeInfoWithStorage[127.0.0.1:32783,null,null], DatanodeInfoWithStorage[127.0.0.1:38455,null,null]] 2024-11-24T18:53:53,471 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-24T18:53:53,476 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3fd17220{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T18:53:53,476 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4c68f920{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-24T18:53:53,476 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-24T18:53:53,476 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2ab5b96c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-24T18:53:53,476 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@30008f24{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e6689f1-50c4-dda5-193a-9d3c069fe268/hadoop.log.dir/,STOPPED} 2024-11-24T18:53:53,481 WARN [BP-829498211-172.17.0.2-1732474383219 heartbeating to localhost/127.0.0.1:37713 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-24T18:53:53,481 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-24T18:53:53,481 WARN [BP-829498211-172.17.0.2-1732474383219 heartbeating to localhost/127.0.0.1:37713 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-829498211-172.17.0.2-1732474383219 (Datanode Uuid b8ca2119-ac3b-47a1-a2c3-1d0f556c0334) service to localhost/127.0.0.1:37713 2024-11-24T18:53:53,481 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-24T18:53:53,481 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e6689f1-50c4-dda5-193a-9d3c069fe268/cluster_18358266-8dd0-4c2e-1aeb-38759830aa93/data/data5/current/BP-829498211-172.17.0.2-1732474383219 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T18:53:53,482 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e6689f1-50c4-dda5-193a-9d3c069fe268/cluster_18358266-8dd0-4c2e-1aeb-38759830aa93/data/data6/current/BP-829498211-172.17.0.2-1732474383219 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T18:53:53,482 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-24T18:53:53,489 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7982676d{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-24T18:53:53,490 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2efbdc75{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-24T18:53:53,490 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-24T18:53:53,490 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1d790455{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-24T18:53:53,490 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3150e6db{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e6689f1-50c4-dda5-193a-9d3c069fe268/hadoop.log.dir/,STOPPED} 2024-11-24T18:53:53,497 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-24T18:53:53,541 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-24T18:53:53,550 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnDatanodeDeath Thread=157 (was 82) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:40133 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:37713 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:37713 from jenkins.hfs.2 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-11 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-14-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-10 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-7 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:37713 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-9-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.3@localhost:37713 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:37713 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:37713 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: LeaseRenewer:jenkins@localhost:37713 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:37713 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-8 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-15-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-5 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:37713 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-9-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-14-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-9 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-6 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Close-WAL-Writer-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:175) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$898/0x00007fcb3cbf4000.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.2@localhost:37713 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-15-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.1@localhost:40133 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:37713 from jenkins.hfs.3 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-15-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-14-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'DataNode' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: HMaster-EventLoopGroup-5-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Close-WAL-Writer-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:175) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$898/0x00007fcb3cbf4000.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=448 (was 402) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=272 (was 155) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=8607 (was 9913) 2024-11-24T18:53:53,559 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnPipelineRestart Thread=157, OpenFileDescriptor=448, MaxFileDescriptor=1048576, SystemLoadAverage=272, ProcessCount=11, AvailableMemoryMB=8607 2024-11-24T18:53:53,559 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-24T18:53:53,559 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e6689f1-50c4-dda5-193a-9d3c069fe268/hadoop.log.dir so I do NOT create it in target/test-data/c9f31953-3e74-7f96-380c-dd31e3afaa08 2024-11-24T18:53:53,559 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e6689f1-50c4-dda5-193a-9d3c069fe268/hadoop.tmp.dir so I do NOT create it in target/test-data/c9f31953-3e74-7f96-380c-dd31e3afaa08 2024-11-24T18:53:53,559 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c9f31953-3e74-7f96-380c-dd31e3afaa08/cluster_c7dc029b-5c53-012f-f1b6-357157c49622, deleteOnExit=true 2024-11-24T18:53:53,560 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-24T18:53:53,560 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c9f31953-3e74-7f96-380c-dd31e3afaa08/test.cache.data in system properties and HBase conf 2024-11-24T18:53:53,560 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c9f31953-3e74-7f96-380c-dd31e3afaa08/hadoop.tmp.dir in system properties and HBase conf 2024-11-24T18:53:53,560 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c9f31953-3e74-7f96-380c-dd31e3afaa08/hadoop.log.dir in system properties and HBase conf 2024-11-24T18:53:53,560 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c9f31953-3e74-7f96-380c-dd31e3afaa08/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-24T18:53:53,560 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c9f31953-3e74-7f96-380c-dd31e3afaa08/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-24T18:53:53,560 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-24T18:53:53,560 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-24T18:53:53,561 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c9f31953-3e74-7f96-380c-dd31e3afaa08/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-24T18:53:53,561 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c9f31953-3e74-7f96-380c-dd31e3afaa08/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-24T18:53:53,561 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c9f31953-3e74-7f96-380c-dd31e3afaa08/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-24T18:53:53,561 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c9f31953-3e74-7f96-380c-dd31e3afaa08/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-24T18:53:53,561 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c9f31953-3e74-7f96-380c-dd31e3afaa08/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-24T18:53:53,561 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c9f31953-3e74-7f96-380c-dd31e3afaa08/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-24T18:53:53,562 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c9f31953-3e74-7f96-380c-dd31e3afaa08/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-24T18:53:53,562 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c9f31953-3e74-7f96-380c-dd31e3afaa08/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-24T18:53:53,562 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c9f31953-3e74-7f96-380c-dd31e3afaa08/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-24T18:53:53,562 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c9f31953-3e74-7f96-380c-dd31e3afaa08/nfs.dump.dir in system properties and HBase conf 2024-11-24T18:53:53,562 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c9f31953-3e74-7f96-380c-dd31e3afaa08/java.io.tmpdir in system properties and HBase conf 2024-11-24T18:53:53,562 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c9f31953-3e74-7f96-380c-dd31e3afaa08/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-24T18:53:53,562 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c9f31953-3e74-7f96-380c-dd31e3afaa08/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-24T18:53:53,562 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c9f31953-3e74-7f96-380c-dd31e3afaa08/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-24T18:53:53,581 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-24T18:53:53,952 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T18:53:53,958 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-24T18:53:53,961 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-24T18:53:53,961 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-24T18:53:53,961 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-24T18:53:53,962 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T18:53:53,962 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2e4cbcc2{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c9f31953-3e74-7f96-380c-dd31e3afaa08/hadoop.log.dir/,AVAILABLE} 2024-11-24T18:53:53,963 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@25a29a07{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-24T18:53:54,059 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@249ce69c{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c9f31953-3e74-7f96-380c-dd31e3afaa08/java.io.tmpdir/jetty-localhost-36375-hadoop-hdfs-3_4_1-tests_jar-_-any-3972589632403492727/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-24T18:53:54,060 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7c2762d6{HTTP/1.1, (http/1.1)}{localhost:36375} 2024-11-24T18:53:54,060 INFO [Time-limited test {}] server.Server(415): Started @163412ms 2024-11-24T18:53:54,071 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-24T18:53:54,312 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T18:53:54,316 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-24T18:53:54,317 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-24T18:53:54,317 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-24T18:53:54,317 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-24T18:53:54,318 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3bfb2c17{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c9f31953-3e74-7f96-380c-dd31e3afaa08/hadoop.log.dir/,AVAILABLE} 2024-11-24T18:53:54,319 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1fc8bed8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-24T18:53:54,393 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,43977,1732474387094/f2b92657890a%2C43977%2C1732474387094.1732474387326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:53:54,414 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@6b8d1f8b{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c9f31953-3e74-7f96-380c-dd31e3afaa08/java.io.tmpdir/jetty-localhost-42907-hadoop-hdfs-3_4_1-tests_jar-_-any-7675237284618725177/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T18:53:54,414 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@62bbed65{HTTP/1.1, (http/1.1)}{localhost:42907} 2024-11-24T18:53:54,415 INFO [Time-limited test {}] server.Server(415): Started @163767ms 2024-11-24T18:53:54,416 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-24T18:53:54,421 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.meta.1732474386931.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:53:54,447 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T18:53:54,451 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-24T18:53:54,452 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-24T18:53:54,452 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-24T18:53:54,452 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-24T18:53:54,453 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@59ddfe6e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c9f31953-3e74-7f96-380c-dd31e3afaa08/hadoop.log.dir/,AVAILABLE} 2024-11-24T18:53:54,454 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@64434c96{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-24T18:53:54,567 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5949a2aa{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c9f31953-3e74-7f96-380c-dd31e3afaa08/java.io.tmpdir/jetty-localhost-34075-hadoop-hdfs-3_4_1-tests_jar-_-any-9193841827127147274/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T18:53:54,568 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4958e5b2{HTTP/1.1, (http/1.1)}{localhost:34075} 2024-11-24T18:53:54,568 INFO [Time-limited test {}] server.Server(415): Started @163920ms 2024-11-24T18:53:54,569 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-24T18:53:55,394 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,43977,1732474387094/f2b92657890a%2C43977%2C1732474387094.1732474387326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:53:55,421 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.meta.1732474386931.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:53:55,665 WARN [Thread-1201 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c9f31953-3e74-7f96-380c-dd31e3afaa08/cluster_c7dc029b-5c53-012f-f1b6-357157c49622/data/data1/current/BP-1026925338-172.17.0.2-1732474433586/current, will proceed with Du for space computation calculation, 2024-11-24T18:53:55,666 WARN [Thread-1202 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c9f31953-3e74-7f96-380c-dd31e3afaa08/cluster_c7dc029b-5c53-012f-f1b6-357157c49622/data/data2/current/BP-1026925338-172.17.0.2-1732474433586/current, will proceed with Du for space computation calculation, 2024-11-24T18:53:55,685 WARN [Thread-1165 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-24T18:53:55,687 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x8d30fae1cceb93b4 with lease ID 0x57567d8a483d7f7e: Processing first storage report for DS-d0cc9942-eed6-414e-ae88-9ba9bf2fa8aa from datanode DatanodeRegistration(127.0.0.1:45717, datanodeUuid=54fb0458-8a6c-4eb6-ba27-59e0846caccf, infoPort=39711, infoSecurePort=0, ipcPort=42085, storageInfo=lv=-57;cid=testClusterID;nsid=1550467677;c=1732474433586) 2024-11-24T18:53:55,688 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x8d30fae1cceb93b4 with lease ID 0x57567d8a483d7f7e: from storage DS-d0cc9942-eed6-414e-ae88-9ba9bf2fa8aa node DatanodeRegistration(127.0.0.1:45717, datanodeUuid=54fb0458-8a6c-4eb6-ba27-59e0846caccf, infoPort=39711, infoSecurePort=0, ipcPort=42085, storageInfo=lv=-57;cid=testClusterID;nsid=1550467677;c=1732474433586), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T18:53:55,688 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x8d30fae1cceb93b4 with lease ID 0x57567d8a483d7f7e: Processing first storage report for DS-3b6f802e-9ac7-4a38-91a1-643df6cd5d8f from datanode DatanodeRegistration(127.0.0.1:45717, datanodeUuid=54fb0458-8a6c-4eb6-ba27-59e0846caccf, infoPort=39711, infoSecurePort=0, ipcPort=42085, storageInfo=lv=-57;cid=testClusterID;nsid=1550467677;c=1732474433586) 2024-11-24T18:53:55,688 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x8d30fae1cceb93b4 with lease ID 0x57567d8a483d7f7e: from storage DS-3b6f802e-9ac7-4a38-91a1-643df6cd5d8f node DatanodeRegistration(127.0.0.1:45717, datanodeUuid=54fb0458-8a6c-4eb6-ba27-59e0846caccf, infoPort=39711, infoSecurePort=0, ipcPort=42085, storageInfo=lv=-57;cid=testClusterID;nsid=1550467677;c=1732474433586), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T18:53:55,792 WARN [Thread-1212 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c9f31953-3e74-7f96-380c-dd31e3afaa08/cluster_c7dc029b-5c53-012f-f1b6-357157c49622/data/data3/current/BP-1026925338-172.17.0.2-1732474433586/current, will proceed with Du for space computation calculation, 2024-11-24T18:53:55,792 WARN [Thread-1213 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c9f31953-3e74-7f96-380c-dd31e3afaa08/cluster_c7dc029b-5c53-012f-f1b6-357157c49622/data/data4/current/BP-1026925338-172.17.0.2-1732474433586/current, will proceed with Du for space computation calculation, 2024-11-24T18:53:55,813 WARN [Thread-1188 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-24T18:53:55,816 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xc692906440a6b41 with lease ID 0x57567d8a483d7f7f: Processing first storage report for DS-52b4e86e-459b-4be2-811d-1ba9b4d7650a from datanode DatanodeRegistration(127.0.0.1:39377, datanodeUuid=e83292c8-aad5-41af-a651-d4bb377cfa6a, infoPort=37303, infoSecurePort=0, ipcPort=46325, storageInfo=lv=-57;cid=testClusterID;nsid=1550467677;c=1732474433586) 2024-11-24T18:53:55,816 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc692906440a6b41 with lease ID 0x57567d8a483d7f7f: from storage DS-52b4e86e-459b-4be2-811d-1ba9b4d7650a node DatanodeRegistration(127.0.0.1:39377, datanodeUuid=e83292c8-aad5-41af-a651-d4bb377cfa6a, infoPort=37303, infoSecurePort=0, ipcPort=46325, storageInfo=lv=-57;cid=testClusterID;nsid=1550467677;c=1732474433586), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T18:53:55,816 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xc692906440a6b41 with lease ID 0x57567d8a483d7f7f: Processing first storage report for DS-7565b014-fb88-49e0-9c34-ea69daa8f9d3 from datanode DatanodeRegistration(127.0.0.1:39377, datanodeUuid=e83292c8-aad5-41af-a651-d4bb377cfa6a, infoPort=37303, infoSecurePort=0, ipcPort=46325, storageInfo=lv=-57;cid=testClusterID;nsid=1550467677;c=1732474433586) 2024-11-24T18:53:55,816 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc692906440a6b41 with lease ID 0x57567d8a483d7f7f: from storage DS-7565b014-fb88-49e0-9c34-ea69daa8f9d3 node DatanodeRegistration(127.0.0.1:39377, datanodeUuid=e83292c8-aad5-41af-a651-d4bb377cfa6a, infoPort=37303, infoSecurePort=0, ipcPort=46325, storageInfo=lv=-57;cid=testClusterID;nsid=1550467677;c=1732474433586), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T18:53:55,903 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c9f31953-3e74-7f96-380c-dd31e3afaa08 2024-11-24T18:53:55,928 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c9f31953-3e74-7f96-380c-dd31e3afaa08/cluster_c7dc029b-5c53-012f-f1b6-357157c49622/zookeeper_0, clientPort=59413, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c9f31953-3e74-7f96-380c-dd31e3afaa08/cluster_c7dc029b-5c53-012f-f1b6-357157c49622/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c9f31953-3e74-7f96-380c-dd31e3afaa08/cluster_c7dc029b-5c53-012f-f1b6-357157c49622/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-24T18:53:55,930 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=59413 2024-11-24T18:53:55,930 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T18:53:55,932 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T18:53:55,946 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39377 is added to blk_1073741825_1001 (size=7) 2024-11-24T18:53:55,947 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45717 is added to blk_1073741825_1001 (size=7) 2024-11-24T18:53:55,948 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:45793/user/jenkins/test-data/c800237f-d88d-dd75-c687-231156bbbfff with version=8 2024-11-24T18:53:55,948 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:40437/user/jenkins/test-data/91bb223a-a964-b097-0799-aa34536414f2/hbase-staging 2024-11-24T18:53:55,951 INFO [Time-limited test {}] client.ConnectionUtils(128): master/f2b92657890a:0 server-side Connection retries=45 2024-11-24T18:53:55,951 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-24T18:53:55,951 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-24T18:53:55,951 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-24T18:53:55,951 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-24T18:53:55,951 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-24T18:53:55,951 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-24T18:53:55,951 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-24T18:53:55,952 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:39263 2024-11-24T18:53:55,954 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:39263 connecting to ZooKeeper ensemble=127.0.0.1:59413 2024-11-24T18:53:56,010 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:392630x0, quorum=127.0.0.1:59413, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-24T18:53:56,011 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:39263-0x1016e31f5020000 connected 2024-11-24T18:53:56,093 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T18:53:56,096 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T18:53:56,101 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:39263-0x1016e31f5020000, quorum=127.0.0.1:59413, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-24T18:53:56,101 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:45793/user/jenkins/test-data/c800237f-d88d-dd75-c687-231156bbbfff, hbase.cluster.distributed=false 2024-11-24T18:53:56,103 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:39263-0x1016e31f5020000, quorum=127.0.0.1:59413, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-24T18:53:56,104 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=39263 2024-11-24T18:53:56,104 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=39263 2024-11-24T18:53:56,104 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=39263 2024-11-24T18:53:56,107 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=39263 2024-11-24T18:53:56,107 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=39263 2024-11-24T18:53:56,123 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/f2b92657890a:0 server-side Connection retries=45 2024-11-24T18:53:56,123 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-24T18:53:56,123 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-24T18:53:56,123 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-24T18:53:56,123 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-24T18:53:56,123 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-24T18:53:56,123 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-24T18:53:56,123 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-24T18:53:56,124 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:40185 2024-11-24T18:53:56,125 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:40185 connecting to ZooKeeper ensemble=127.0.0.1:59413 2024-11-24T18:53:56,126 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T18:53:56,127 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T18:53:56,134 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:401850x0, quorum=127.0.0.1:59413, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-24T18:53:56,135 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:401850x0, quorum=127.0.0.1:59413, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-24T18:53:56,135 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:40185-0x1016e31f5020001 connected 2024-11-24T18:53:56,135 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-24T18:53:56,136 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-24T18:53:56,136 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:40185-0x1016e31f5020001, quorum=127.0.0.1:59413, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-24T18:53:56,137 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:40185-0x1016e31f5020001, quorum=127.0.0.1:59413, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-24T18:53:56,137 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=40185 2024-11-24T18:53:56,140 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=40185 2024-11-24T18:53:56,140 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=40185 2024-11-24T18:53:56,141 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=40185 2024-11-24T18:53:56,141 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=40185 2024-11-24T18:53:56,152 DEBUG [M:0;f2b92657890a:39263 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;f2b92657890a:39263 2024-11-24T18:53:56,153 INFO [master/f2b92657890a:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/f2b92657890a,39263,1732474435950 2024-11-24T18:53:56,165 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40185-0x1016e31f5020001, quorum=127.0.0.1:59413, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-24T18:53:56,165 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39263-0x1016e31f5020000, quorum=127.0.0.1:59413, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-24T18:53:56,166 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:39263-0x1016e31f5020000, quorum=127.0.0.1:59413, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/f2b92657890a,39263,1732474435950 2024-11-24T18:53:56,176 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40185-0x1016e31f5020001, quorum=127.0.0.1:59413, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-24T18:53:56,176 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39263-0x1016e31f5020000, quorum=127.0.0.1:59413, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T18:53:56,176 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40185-0x1016e31f5020001, quorum=127.0.0.1:59413, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T18:53:56,177 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:39263-0x1016e31f5020000, quorum=127.0.0.1:59413, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-24T18:53:56,177 INFO [master/f2b92657890a:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/f2b92657890a,39263,1732474435950 from backup master directory 2024-11-24T18:53:56,187 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39263-0x1016e31f5020000, quorum=127.0.0.1:59413, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/f2b92657890a,39263,1732474435950 2024-11-24T18:53:56,187 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40185-0x1016e31f5020001, quorum=127.0.0.1:59413, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-24T18:53:56,187 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39263-0x1016e31f5020000, quorum=127.0.0.1:59413, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-24T18:53:56,187 WARN [master/f2b92657890a:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-24T18:53:56,187 INFO [master/f2b92657890a:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=f2b92657890a,39263,1732474435950 2024-11-24T18:53:56,195 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:45793/user/jenkins/test-data/c800237f-d88d-dd75-c687-231156bbbfff/hbase.id] with ID: ef4714db-b7f0-43a2-b121-ba3941505581 2024-11-24T18:53:56,195 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:45793/user/jenkins/test-data/c800237f-d88d-dd75-c687-231156bbbfff/.tmp/hbase.id 2024-11-24T18:53:56,201 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39377 is added to blk_1073741826_1002 (size=42) 2024-11-24T18:53:56,201 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45717 is added to blk_1073741826_1002 (size=42) 2024-11-24T18:53:56,202 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:45793/user/jenkins/test-data/c800237f-d88d-dd75-c687-231156bbbfff/.tmp/hbase.id]:[hdfs://localhost:45793/user/jenkins/test-data/c800237f-d88d-dd75-c687-231156bbbfff/hbase.id] 2024-11-24T18:53:56,215 INFO [master/f2b92657890a:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T18:53:56,215 INFO [master/f2b92657890a:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-24T18:53:56,217 INFO [master/f2b92657890a:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-24T18:53:56,229 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39263-0x1016e31f5020000, quorum=127.0.0.1:59413, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T18:53:56,229 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40185-0x1016e31f5020001, quorum=127.0.0.1:59413, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T18:53:56,236 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45717 is added to blk_1073741827_1003 (size=196) 2024-11-24T18:53:56,236 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39377 is added to blk_1073741827_1003 (size=196) 2024-11-24T18:53:56,237 INFO [master/f2b92657890a:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-24T18:53:56,238 INFO [master/f2b92657890a:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-24T18:53:56,238 INFO [master/f2b92657890a:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-24T18:53:56,244 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45717 is added to blk_1073741828_1004 (size=1189) 2024-11-24T18:53:56,245 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39377 is added to blk_1073741828_1004 (size=1189) 2024-11-24T18:53:56,246 INFO [master/f2b92657890a:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:45793/user/jenkins/test-data/c800237f-d88d-dd75-c687-231156bbbfff/MasterData/data/master/store 2024-11-24T18:53:56,252 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45717 is added to blk_1073741829_1005 (size=34) 2024-11-24T18:53:56,252 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39377 is added to blk_1073741829_1005 (size=34) 2024-11-24T18:53:56,253 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T18:53:56,253 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-24T18:53:56,253 INFO [master/f2b92657890a:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T18:53:56,253 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T18:53:56,253 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-24T18:53:56,253 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T18:53:56,253 INFO [master/f2b92657890a:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T18:53:56,253 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732474436253Disabling compacts and flushes for region at 1732474436253Disabling writes for close at 1732474436253Writing region close event to WAL at 1732474436253Closed at 1732474436253 2024-11-24T18:53:56,254 WARN [master/f2b92657890a:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:45793/user/jenkins/test-data/c800237f-d88d-dd75-c687-231156bbbfff/MasterData/data/master/store/.initializing 2024-11-24T18:53:56,254 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:45793/user/jenkins/test-data/c800237f-d88d-dd75-c687-231156bbbfff/MasterData/WALs/f2b92657890a,39263,1732474435950 2024-11-24T18:53:56,257 INFO [master/f2b92657890a:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=f2b92657890a%2C39263%2C1732474435950, suffix=, logDir=hdfs://localhost:45793/user/jenkins/test-data/c800237f-d88d-dd75-c687-231156bbbfff/MasterData/WALs/f2b92657890a,39263,1732474435950, archiveDir=hdfs://localhost:45793/user/jenkins/test-data/c800237f-d88d-dd75-c687-231156bbbfff/MasterData/oldWALs, maxLogs=10 2024-11-24T18:53:56,258 INFO [master/f2b92657890a:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor f2b92657890a%2C39263%2C1732474435950.1732474436257 2024-11-24T18:53:56,263 INFO [master/f2b92657890a:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/c800237f-d88d-dd75-c687-231156bbbfff/MasterData/WALs/f2b92657890a,39263,1732474435950/f2b92657890a%2C39263%2C1732474435950.1732474436257 2024-11-24T18:53:56,264 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37303:37303),(127.0.0.1/127.0.0.1:39711:39711)] 2024-11-24T18:53:56,267 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-24T18:53:56,268 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T18:53:56,268 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T18:53:56,268 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T18:53:56,270 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-24T18:53:56,272 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-24T18:53:56,272 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T18:53:56,273 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T18:53:56,273 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-24T18:53:56,274 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-24T18:53:56,274 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T18:53:56,275 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T18:53:56,275 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-24T18:53:56,276 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-24T18:53:56,276 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T18:53:56,277 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T18:53:56,277 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-24T18:53:56,278 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-24T18:53:56,278 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T18:53:56,279 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T18:53:56,279 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T18:53:56,280 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45793/user/jenkins/test-data/c800237f-d88d-dd75-c687-231156bbbfff/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-24T18:53:56,280 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45793/user/jenkins/test-data/c800237f-d88d-dd75-c687-231156bbbfff/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-24T18:53:56,281 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T18:53:56,281 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T18:53:56,282 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-24T18:53:56,283 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T18:53:56,285 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45793/user/jenkins/test-data/c800237f-d88d-dd75-c687-231156bbbfff/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-24T18:53:56,285 INFO [master/f2b92657890a:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=711241, jitterRate=-0.09561139345169067}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-24T18:53:56,286 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732474436268Initializing all the Stores at 1732474436269 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732474436269Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732474436270 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732474436270Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732474436270Cleaning up temporary data from old regions at 1732474436281 (+11 ms)Region opened successfully at 1732474436286 (+5 ms) 2024-11-24T18:53:56,288 INFO [master/f2b92657890a:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-24T18:53:56,291 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5cb52aea, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=f2b92657890a/172.17.0.2:0 2024-11-24T18:53:56,292 INFO [master/f2b92657890a:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-24T18:53:56,292 INFO [master/f2b92657890a:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-24T18:53:56,293 INFO [master/f2b92657890a:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-24T18:53:56,293 INFO [master/f2b92657890a:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-24T18:53:56,293 INFO [master/f2b92657890a:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-24T18:53:56,294 INFO [master/f2b92657890a:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-24T18:53:56,294 INFO [master/f2b92657890a:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-24T18:53:56,296 INFO [master/f2b92657890a:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-24T18:53:56,296 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39263-0x1016e31f5020000, quorum=127.0.0.1:59413, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-24T18:53:56,303 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-24T18:53:56,303 INFO [master/f2b92657890a:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-24T18:53:56,304 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39263-0x1016e31f5020000, quorum=127.0.0.1:59413, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-24T18:53:56,313 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-24T18:53:56,314 INFO [master/f2b92657890a:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-24T18:53:56,315 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39263-0x1016e31f5020000, quorum=127.0.0.1:59413, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-24T18:53:56,324 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-24T18:53:56,325 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39263-0x1016e31f5020000, quorum=127.0.0.1:59413, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-24T18:53:56,395 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,43977,1732474387094/f2b92657890a%2C43977%2C1732474387094.1732474387326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:53:56,414 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-24T18:53:56,416 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39263-0x1016e31f5020000, quorum=127.0.0.1:59413, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-24T18:53:56,423 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.meta.1732474386931.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:53:56,466 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-24T18:53:56,492 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39263-0x1016e31f5020000, quorum=127.0.0.1:59413, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-24T18:53:56,492 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40185-0x1016e31f5020001, quorum=127.0.0.1:59413, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-24T18:53:56,492 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40185-0x1016e31f5020001, quorum=127.0.0.1:59413, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T18:53:56,492 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39263-0x1016e31f5020000, quorum=127.0.0.1:59413, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T18:53:56,493 INFO [master/f2b92657890a:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=f2b92657890a,39263,1732474435950, sessionid=0x1016e31f5020000, setting cluster-up flag (Was=false) 2024-11-24T18:53:56,513 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39263-0x1016e31f5020000, quorum=127.0.0.1:59413, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T18:53:56,513 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40185-0x1016e31f5020001, quorum=127.0.0.1:59413, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T18:53:56,545 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-24T18:53:56,548 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=f2b92657890a,39263,1732474435950 2024-11-24T18:53:56,566 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40185-0x1016e31f5020001, quorum=127.0.0.1:59413, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T18:53:56,566 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39263-0x1016e31f5020000, quorum=127.0.0.1:59413, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T18:53:56,597 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-24T18:53:56,600 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=f2b92657890a,39263,1732474435950 2024-11-24T18:53:56,601 INFO [master/f2b92657890a:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:45793/user/jenkins/test-data/c800237f-d88d-dd75-c687-231156bbbfff/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-24T18:53:56,604 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-24T18:53:56,604 INFO [master/f2b92657890a:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-24T18:53:56,604 INFO [master/f2b92657890a:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-24T18:53:56,605 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: f2b92657890a,39263,1732474435950 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-24T18:53:56,607 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/f2b92657890a:0, corePoolSize=5, maxPoolSize=5 2024-11-24T18:53:56,607 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/f2b92657890a:0, corePoolSize=5, maxPoolSize=5 2024-11-24T18:53:56,607 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/f2b92657890a:0, corePoolSize=5, maxPoolSize=5 2024-11-24T18:53:56,607 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/f2b92657890a:0, corePoolSize=5, maxPoolSize=5 2024-11-24T18:53:56,607 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/f2b92657890a:0, corePoolSize=10, maxPoolSize=10 2024-11-24T18:53:56,607 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/f2b92657890a:0, corePoolSize=1, maxPoolSize=1 2024-11-24T18:53:56,607 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/f2b92657890a:0, corePoolSize=2, maxPoolSize=2 2024-11-24T18:53:56,607 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/f2b92657890a:0, corePoolSize=1, maxPoolSize=1 2024-11-24T18:53:56,608 INFO [master/f2b92657890a:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732474466608 2024-11-24T18:53:56,608 INFO [master/f2b92657890a:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-24T18:53:56,608 INFO [master/f2b92657890a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-24T18:53:56,609 INFO [master/f2b92657890a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-24T18:53:56,609 INFO [master/f2b92657890a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-24T18:53:56,609 INFO [master/f2b92657890a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-24T18:53:56,609 INFO [master/f2b92657890a:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-24T18:53:56,609 INFO [master/f2b92657890a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-24T18:53:56,609 INFO [master/f2b92657890a:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-24T18:53:56,609 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-24T18:53:56,610 INFO [master/f2b92657890a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-24T18:53:56,610 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-24T18:53:56,610 INFO [master/f2b92657890a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-24T18:53:56,610 INFO [master/f2b92657890a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-24T18:53:56,610 INFO [master/f2b92657890a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-24T18:53:56,610 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/f2b92657890a:0:becomeActiveMaster-HFileCleaner.large.0-1732474436610,5,FailOnTimeoutGroup] 2024-11-24T18:53:56,611 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/f2b92657890a:0:becomeActiveMaster-HFileCleaner.small.0-1732474436610,5,FailOnTimeoutGroup] 2024-11-24T18:53:56,611 INFO [master/f2b92657890a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-24T18:53:56,611 INFO [master/f2b92657890a:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-24T18:53:56,611 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T18:53:56,611 INFO [master/f2b92657890a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-24T18:53:56,611 INFO [master/f2b92657890a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-24T18:53:56,611 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-24T18:53:56,618 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39377 is added to blk_1073741831_1007 (size=1321) 2024-11-24T18:53:56,618 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45717 is added to blk_1073741831_1007 (size=1321) 2024-11-24T18:53:56,619 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:45793/user/jenkins/test-data/c800237f-d88d-dd75-c687-231156bbbfff/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-24T18:53:56,620 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:45793/user/jenkins/test-data/c800237f-d88d-dd75-c687-231156bbbfff 2024-11-24T18:53:56,628 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45717 is added to blk_1073741832_1008 (size=32) 2024-11-24T18:53:56,628 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39377 is added to blk_1073741832_1008 (size=32) 2024-11-24T18:53:56,629 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T18:53:56,630 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-24T18:53:56,631 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-24T18:53:56,631 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T18:53:56,632 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T18:53:56,632 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-24T18:53:56,633 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-24T18:53:56,633 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T18:53:56,633 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T18:53:56,633 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-24T18:53:56,634 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-24T18:53:56,634 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T18:53:56,635 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T18:53:56,635 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-24T18:53:56,636 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-24T18:53:56,636 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T18:53:56,636 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T18:53:56,636 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-24T18:53:56,637 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45793/user/jenkins/test-data/c800237f-d88d-dd75-c687-231156bbbfff/data/hbase/meta/1588230740 2024-11-24T18:53:56,637 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45793/user/jenkins/test-data/c800237f-d88d-dd75-c687-231156bbbfff/data/hbase/meta/1588230740 2024-11-24T18:53:56,638 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-24T18:53:56,638 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-24T18:53:56,639 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-24T18:53:56,640 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-24T18:53:56,642 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45793/user/jenkins/test-data/c800237f-d88d-dd75-c687-231156bbbfff/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-24T18:53:56,642 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=693026, jitterRate=-0.11877204477787018}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-24T18:53:56,643 INFO [RS:0;f2b92657890a:40185 {}] regionserver.HRegionServer(746): ClusterId : ef4714db-b7f0-43a2-b121-ba3941505581 2024-11-24T18:53:56,643 DEBUG [RS:0;f2b92657890a:40185 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-24T18:53:56,643 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732474436629Initializing all the Stores at 1732474436630 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732474436630Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732474436630Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732474436630Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732474436630Cleaning up temporary data from old regions at 1732474436638 (+8 ms)Region opened successfully at 1732474436643 (+5 ms) 2024-11-24T18:53:56,643 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-24T18:53:56,643 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-24T18:53:56,643 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-24T18:53:56,643 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-24T18:53:56,643 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-24T18:53:56,644 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-24T18:53:56,644 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732474436643Disabling compacts and flushes for region at 1732474436643Disabling writes for close at 1732474436643Writing region close event to WAL at 1732474436643Closed at 1732474436643 2024-11-24T18:53:56,645 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-24T18:53:56,645 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-24T18:53:56,645 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-24T18:53:56,646 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-24T18:53:56,647 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-24T18:53:56,651 DEBUG [RS:0;f2b92657890a:40185 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-24T18:53:56,651 DEBUG [RS:0;f2b92657890a:40185 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-24T18:53:56,661 DEBUG [RS:0;f2b92657890a:40185 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-24T18:53:56,662 DEBUG [RS:0;f2b92657890a:40185 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7b4b96b6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=f2b92657890a/172.17.0.2:0 2024-11-24T18:53:56,672 DEBUG [RS:0;f2b92657890a:40185 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;f2b92657890a:40185 2024-11-24T18:53:56,672 INFO [RS:0;f2b92657890a:40185 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-24T18:53:56,672 INFO [RS:0;f2b92657890a:40185 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-24T18:53:56,672 DEBUG [RS:0;f2b92657890a:40185 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-24T18:53:56,673 INFO [RS:0;f2b92657890a:40185 {}] regionserver.HRegionServer(2659): reportForDuty to master=f2b92657890a,39263,1732474435950 with port=40185, startcode=1732474436122 2024-11-24T18:53:56,673 DEBUG [RS:0;f2b92657890a:40185 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-24T18:53:56,675 INFO [HMaster-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58857, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.4 (auth:SIMPLE), service=RegionServerStatusService 2024-11-24T18:53:56,675 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39263 {}] master.ServerManager(363): Checking decommissioned status of RegionServer f2b92657890a,40185,1732474436122 2024-11-24T18:53:56,676 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39263 {}] master.ServerManager(517): Registering regionserver=f2b92657890a,40185,1732474436122 2024-11-24T18:53:56,677 DEBUG [RS:0;f2b92657890a:40185 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:45793/user/jenkins/test-data/c800237f-d88d-dd75-c687-231156bbbfff 2024-11-24T18:53:56,677 DEBUG [RS:0;f2b92657890a:40185 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:45793 2024-11-24T18:53:56,677 DEBUG [RS:0;f2b92657890a:40185 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-24T18:53:56,682 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39263-0x1016e31f5020000, quorum=127.0.0.1:59413, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-24T18:53:56,682 DEBUG [RS:0;f2b92657890a:40185 {}] zookeeper.ZKUtil(111): regionserver:40185-0x1016e31f5020001, quorum=127.0.0.1:59413, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/f2b92657890a,40185,1732474436122 2024-11-24T18:53:56,682 WARN [RS:0;f2b92657890a:40185 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-24T18:53:56,682 INFO [RS:0;f2b92657890a:40185 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-24T18:53:56,682 DEBUG [RS:0;f2b92657890a:40185 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:45793/user/jenkins/test-data/c800237f-d88d-dd75-c687-231156bbbfff/WALs/f2b92657890a,40185,1732474436122 2024-11-24T18:53:56,683 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [f2b92657890a,40185,1732474436122] 2024-11-24T18:53:56,686 INFO [RS:0;f2b92657890a:40185 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-24T18:53:56,688 INFO [RS:0;f2b92657890a:40185 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-24T18:53:56,688 INFO [RS:0;f2b92657890a:40185 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-24T18:53:56,688 INFO [RS:0;f2b92657890a:40185 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T18:53:56,689 INFO [RS:0;f2b92657890a:40185 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-24T18:53:56,690 INFO [RS:0;f2b92657890a:40185 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-24T18:53:56,690 INFO [RS:0;f2b92657890a:40185 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-24T18:53:56,690 DEBUG [RS:0;f2b92657890a:40185 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/f2b92657890a:0, corePoolSize=1, maxPoolSize=1 2024-11-24T18:53:56,690 DEBUG [RS:0;f2b92657890a:40185 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/f2b92657890a:0, corePoolSize=1, maxPoolSize=1 2024-11-24T18:53:56,690 DEBUG [RS:0;f2b92657890a:40185 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/f2b92657890a:0, corePoolSize=1, maxPoolSize=1 2024-11-24T18:53:56,690 DEBUG [RS:0;f2b92657890a:40185 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/f2b92657890a:0, corePoolSize=1, maxPoolSize=1 2024-11-24T18:53:56,690 DEBUG [RS:0;f2b92657890a:40185 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/f2b92657890a:0, corePoolSize=1, maxPoolSize=1 2024-11-24T18:53:56,690 DEBUG [RS:0;f2b92657890a:40185 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/f2b92657890a:0, corePoolSize=2, maxPoolSize=2 2024-11-24T18:53:56,690 DEBUG [RS:0;f2b92657890a:40185 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/f2b92657890a:0, corePoolSize=1, maxPoolSize=1 2024-11-24T18:53:56,691 DEBUG [RS:0;f2b92657890a:40185 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/f2b92657890a:0, corePoolSize=1, maxPoolSize=1 2024-11-24T18:53:56,691 DEBUG [RS:0;f2b92657890a:40185 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/f2b92657890a:0, corePoolSize=1, maxPoolSize=1 2024-11-24T18:53:56,691 DEBUG [RS:0;f2b92657890a:40185 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/f2b92657890a:0, corePoolSize=1, maxPoolSize=1 2024-11-24T18:53:56,691 DEBUG [RS:0;f2b92657890a:40185 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/f2b92657890a:0, corePoolSize=1, maxPoolSize=1 2024-11-24T18:53:56,691 DEBUG [RS:0;f2b92657890a:40185 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/f2b92657890a:0, corePoolSize=1, maxPoolSize=1 2024-11-24T18:53:56,691 DEBUG [RS:0;f2b92657890a:40185 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/f2b92657890a:0, corePoolSize=3, maxPoolSize=3 2024-11-24T18:53:56,691 DEBUG [RS:0;f2b92657890a:40185 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/f2b92657890a:0, corePoolSize=3, maxPoolSize=3 2024-11-24T18:53:56,692 INFO [RS:0;f2b92657890a:40185 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-24T18:53:56,692 INFO [RS:0;f2b92657890a:40185 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-24T18:53:56,692 INFO [RS:0;f2b92657890a:40185 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T18:53:56,692 INFO [RS:0;f2b92657890a:40185 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-24T18:53:56,692 INFO [RS:0;f2b92657890a:40185 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-24T18:53:56,693 INFO [RS:0;f2b92657890a:40185 {}] hbase.ChoreService(168): Chore ScheduledChore name=f2b92657890a,40185,1732474436122-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-24T18:53:56,707 INFO [RS:0;f2b92657890a:40185 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-24T18:53:56,707 INFO [RS:0;f2b92657890a:40185 {}] hbase.ChoreService(168): Chore ScheduledChore name=f2b92657890a,40185,1732474436122-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T18:53:56,707 INFO [RS:0;f2b92657890a:40185 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T18:53:56,707 INFO [RS:0;f2b92657890a:40185 {}] regionserver.Replication(171): f2b92657890a,40185,1732474436122 started 2024-11-24T18:53:56,721 INFO [RS:0;f2b92657890a:40185 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T18:53:56,721 INFO [RS:0;f2b92657890a:40185 {}] regionserver.HRegionServer(1482): Serving as f2b92657890a,40185,1732474436122, RpcServer on f2b92657890a/172.17.0.2:40185, sessionid=0x1016e31f5020001 2024-11-24T18:53:56,721 DEBUG [RS:0;f2b92657890a:40185 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-24T18:53:56,721 DEBUG [RS:0;f2b92657890a:40185 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager f2b92657890a,40185,1732474436122 2024-11-24T18:53:56,721 DEBUG [RS:0;f2b92657890a:40185 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'f2b92657890a,40185,1732474436122' 2024-11-24T18:53:56,721 DEBUG [RS:0;f2b92657890a:40185 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-24T18:53:56,722 DEBUG [RS:0;f2b92657890a:40185 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-24T18:53:56,722 DEBUG [RS:0;f2b92657890a:40185 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-24T18:53:56,722 DEBUG [RS:0;f2b92657890a:40185 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-24T18:53:56,722 DEBUG [RS:0;f2b92657890a:40185 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager f2b92657890a,40185,1732474436122 2024-11-24T18:53:56,722 DEBUG [RS:0;f2b92657890a:40185 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'f2b92657890a,40185,1732474436122' 2024-11-24T18:53:56,722 DEBUG [RS:0;f2b92657890a:40185 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-24T18:53:56,723 DEBUG [RS:0;f2b92657890a:40185 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-24T18:53:56,723 DEBUG [RS:0;f2b92657890a:40185 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-24T18:53:56,723 INFO [RS:0;f2b92657890a:40185 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-24T18:53:56,723 INFO [RS:0;f2b92657890a:40185 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-24T18:53:56,797 WARN [f2b92657890a:39263 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-24T18:53:56,827 INFO [RS:0;f2b92657890a:40185 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=f2b92657890a%2C40185%2C1732474436122, suffix=, logDir=hdfs://localhost:45793/user/jenkins/test-data/c800237f-d88d-dd75-c687-231156bbbfff/WALs/f2b92657890a,40185,1732474436122, archiveDir=hdfs://localhost:45793/user/jenkins/test-data/c800237f-d88d-dd75-c687-231156bbbfff/oldWALs, maxLogs=32 2024-11-24T18:53:56,828 INFO [RS:0;f2b92657890a:40185 {}] monitor.StreamSlowMonitor(122): New stream slow monitor f2b92657890a%2C40185%2C1732474436122.1732474436828 2024-11-24T18:53:56,838 INFO [RS:0;f2b92657890a:40185 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/c800237f-d88d-dd75-c687-231156bbbfff/WALs/f2b92657890a,40185,1732474436122/f2b92657890a%2C40185%2C1732474436122.1732474436828 2024-11-24T18:53:56,839 DEBUG [RS:0;f2b92657890a:40185 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39711:39711),(127.0.0.1/127.0.0.1:37303:37303)] 2024-11-24T18:53:57,048 DEBUG [f2b92657890a:39263 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-24T18:53:57,049 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=f2b92657890a,40185,1732474436122 2024-11-24T18:53:57,053 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as f2b92657890a,40185,1732474436122, state=OPENING 2024-11-24T18:53:57,103 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-24T18:53:57,113 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40185-0x1016e31f5020001, quorum=127.0.0.1:59413, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T18:53:57,113 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39263-0x1016e31f5020000, quorum=127.0.0.1:59413, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T18:53:57,114 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-24T18:53:57,114 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-24T18:53:57,115 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=f2b92657890a,40185,1732474436122}] 2024-11-24T18:53:57,115 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-24T18:53:57,271 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-24T18:53:57,275 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39925, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-24T18:53:57,281 INFO [RS_OPEN_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-24T18:53:57,281 INFO [RS_OPEN_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-24T18:53:57,285 INFO [RS_OPEN_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=f2b92657890a%2C40185%2C1732474436122.meta, suffix=.meta, logDir=hdfs://localhost:45793/user/jenkins/test-data/c800237f-d88d-dd75-c687-231156bbbfff/WALs/f2b92657890a,40185,1732474436122, archiveDir=hdfs://localhost:45793/user/jenkins/test-data/c800237f-d88d-dd75-c687-231156bbbfff/oldWALs, maxLogs=32 2024-11-24T18:53:57,285 INFO [RS_OPEN_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor f2b92657890a%2C40185%2C1732474436122.meta.1732474437285.meta 2024-11-24T18:53:57,291 INFO [RS_OPEN_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/c800237f-d88d-dd75-c687-231156bbbfff/WALs/f2b92657890a,40185,1732474436122/f2b92657890a%2C40185%2C1732474436122.meta.1732474437285.meta 2024-11-24T18:53:57,293 DEBUG [RS_OPEN_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37303:37303),(127.0.0.1/127.0.0.1:39711:39711)] 2024-11-24T18:53:57,293 DEBUG [RS_OPEN_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-24T18:53:57,293 DEBUG [RS_OPEN_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-24T18:53:57,294 DEBUG [RS_OPEN_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-24T18:53:57,294 INFO [RS_OPEN_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-24T18:53:57,294 DEBUG [RS_OPEN_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-24T18:53:57,294 DEBUG [RS_OPEN_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T18:53:57,294 DEBUG [RS_OPEN_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-24T18:53:57,294 DEBUG [RS_OPEN_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-24T18:53:57,295 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-24T18:53:57,295 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-24T18:53:57,296 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T18:53:57,296 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T18:53:57,296 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-24T18:53:57,297 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-24T18:53:57,297 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T18:53:57,297 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T18:53:57,297 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-24T18:53:57,298 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-24T18:53:57,298 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T18:53:57,298 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T18:53:57,298 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-24T18:53:57,299 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-24T18:53:57,299 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T18:53:57,299 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T18:53:57,299 DEBUG [RS_OPEN_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-24T18:53:57,300 DEBUG [RS_OPEN_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45793/user/jenkins/test-data/c800237f-d88d-dd75-c687-231156bbbfff/data/hbase/meta/1588230740 2024-11-24T18:53:57,301 DEBUG [RS_OPEN_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45793/user/jenkins/test-data/c800237f-d88d-dd75-c687-231156bbbfff/data/hbase/meta/1588230740 2024-11-24T18:53:57,302 DEBUG [RS_OPEN_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-24T18:53:57,302 DEBUG [RS_OPEN_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-24T18:53:57,302 DEBUG [RS_OPEN_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-24T18:53:57,304 DEBUG [RS_OPEN_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-24T18:53:57,304 INFO [RS_OPEN_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=819178, jitterRate=0.041639283299446106}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-24T18:53:57,304 DEBUG [RS_OPEN_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-24T18:53:57,305 DEBUG [RS_OPEN_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732474437294Writing region info on filesystem at 1732474437294Initializing all the Stores at 1732474437295 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732474437295Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732474437295Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732474437295Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732474437295Cleaning up temporary data from old regions at 1732474437302 (+7 ms)Running coprocessor post-open hooks at 1732474437304 (+2 ms)Region opened successfully at 1732474437305 (+1 ms) 2024-11-24T18:53:57,306 INFO [RS_OPEN_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732474437270 2024-11-24T18:53:57,308 DEBUG [RS_OPEN_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-24T18:53:57,308 INFO [RS_OPEN_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-24T18:53:57,309 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=f2b92657890a,40185,1732474436122 2024-11-24T18:53:57,310 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as f2b92657890a,40185,1732474436122, state=OPEN 2024-11-24T18:53:57,347 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39263-0x1016e31f5020000, quorum=127.0.0.1:59413, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-24T18:53:57,347 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40185-0x1016e31f5020001, quorum=127.0.0.1:59413, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-24T18:53:57,347 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=f2b92657890a,40185,1732474436122 2024-11-24T18:53:57,347 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-24T18:53:57,347 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-24T18:53:57,352 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-24T18:53:57,352 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=f2b92657890a,40185,1732474436122 in 233 msec 2024-11-24T18:53:57,356 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-24T18:53:57,356 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 707 msec 2024-11-24T18:53:57,358 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-24T18:53:57,358 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-24T18:53:57,359 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-24T18:53:57,359 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=f2b92657890a,40185,1732474436122, seqNum=-1] 2024-11-24T18:53:57,360 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-24T18:53:57,361 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36087, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-24T18:53:57,367 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 763 msec 2024-11-24T18:53:57,367 INFO [master/f2b92657890a:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732474437367, completionTime=-1 2024-11-24T18:53:57,368 INFO [master/f2b92657890a:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-24T18:53:57,368 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-24T18:53:57,369 INFO [master/f2b92657890a:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-24T18:53:57,369 INFO [master/f2b92657890a:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732474497369 2024-11-24T18:53:57,369 INFO [master/f2b92657890a:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732474557369 2024-11-24T18:53:57,369 INFO [master/f2b92657890a:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 1 msec 2024-11-24T18:53:57,370 INFO [master/f2b92657890a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=f2b92657890a,39263,1732474435950-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T18:53:57,370 INFO [master/f2b92657890a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=f2b92657890a,39263,1732474435950-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T18:53:57,370 INFO [master/f2b92657890a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=f2b92657890a,39263,1732474435950-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T18:53:57,370 INFO [master/f2b92657890a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-f2b92657890a:39263, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T18:53:57,370 INFO [master/f2b92657890a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-24T18:53:57,370 INFO [master/f2b92657890a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-24T18:53:57,372 DEBUG [master/f2b92657890a:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-24T18:53:57,374 INFO [master/f2b92657890a:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.187sec 2024-11-24T18:53:57,374 INFO [master/f2b92657890a:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-24T18:53:57,374 INFO [master/f2b92657890a:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-24T18:53:57,374 INFO [master/f2b92657890a:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-24T18:53:57,374 INFO [master/f2b92657890a:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-24T18:53:57,374 INFO [master/f2b92657890a:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-24T18:53:57,374 INFO [master/f2b92657890a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=f2b92657890a,39263,1732474435950-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-24T18:53:57,374 INFO [master/f2b92657890a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=f2b92657890a,39263,1732474435950-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-24T18:53:57,377 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-24T18:53:57,377 INFO [master/f2b92657890a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-24T18:53:57,377 INFO [master/f2b92657890a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=f2b92657890a,39263,1732474435950-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T18:53:57,396 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,43977,1732474387094/f2b92657890a%2C43977%2C1732474387094.1732474387326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:53:57,423 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.meta.1732474386931.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:53:57,444 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@542bdd12, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-24T18:53:57,445 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request f2b92657890a,39263,-1 for getting cluster id 2024-11-24T18:53:57,445 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-24T18:53:57,448 DEBUG [HMaster-EventLoopGroup-10-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'ef4714db-b7f0-43a2-b121-ba3941505581' 2024-11-24T18:53:57,449 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-24T18:53:57,450 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "ef4714db-b7f0-43a2-b121-ba3941505581" 2024-11-24T18:53:57,450 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@66d037ca, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-24T18:53:57,450 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [f2b92657890a,39263,-1] 2024-11-24T18:53:57,451 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-24T18:53:57,451 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T18:53:57,454 INFO [HMaster-EventLoopGroup-10-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43020, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-24T18:53:57,455 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3d428c33, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-24T18:53:57,456 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-24T18:53:57,457 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=f2b92657890a,40185,1732474436122, seqNum=-1] 2024-11-24T18:53:57,458 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-24T18:53:57,461 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51384, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-24T18:53:57,463 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=f2b92657890a,39263,1732474435950 2024-11-24T18:53:57,464 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T18:53:57,468 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-24T18:53:57,468 INFO [Time-limited test {}] wal.TestLogRolling(320): Starting testLogRollOnPipelineRestart 2024-11-24T18:53:57,468 INFO [Time-limited test {}] wal.TestLogRolling(323): Replication=2 2024-11-24T18:53:57,468 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-24T18:53:57,469 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.AsyncConnectionImpl(321): The fetched master address is f2b92657890a,39263,1732474435950 2024-11-24T18:53:57,470 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@68c52b95 2024-11-24T18:53:57,470 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-24T18:53:57,472 INFO [HMaster-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43022, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-24T18:53:57,472 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39263 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-24T18:53:57,472 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39263 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-24T18:53:57,473 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39263 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRollOnPipelineRestart', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-24T18:53:57,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39263 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart 2024-11-24T18:53:57,476 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_PRE_OPERATION 2024-11-24T18:53:57,476 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T18:53:57,476 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39263 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRollOnPipelineRestart" procId is: 4 2024-11-24T18:53:57,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39263 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-24T18:53:57,478 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-24T18:53:57,485 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39377 is added to blk_1073741835_1011 (size=395) 2024-11-24T18:53:57,485 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45717 is added to blk_1073741835_1011 (size=395) 2024-11-24T18:53:57,487 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 56770e6bab02c95da4526d53dd9ee5e7, NAME => 'TestLogRolling-testLogRollOnPipelineRestart,,1732474437472.56770e6bab02c95da4526d53dd9ee5e7.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRollOnPipelineRestart', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:45793/user/jenkins/test-data/c800237f-d88d-dd75-c687-231156bbbfff 2024-11-24T18:53:57,493 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45717 is added to blk_1073741836_1012 (size=78) 2024-11-24T18:53:57,493 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39377 is added to blk_1073741836_1012 (size=78) 2024-11-24T18:53:57,494 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnPipelineRestart,,1732474437472.56770e6bab02c95da4526d53dd9ee5e7.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T18:53:57,494 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1722): Closing 56770e6bab02c95da4526d53dd9ee5e7, disabling compactions & flushes 2024-11-24T18:53:57,494 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnPipelineRestart,,1732474437472.56770e6bab02c95da4526d53dd9ee5e7. 2024-11-24T18:53:57,494 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnPipelineRestart,,1732474437472.56770e6bab02c95da4526d53dd9ee5e7. 2024-11-24T18:53:57,494 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnPipelineRestart,,1732474437472.56770e6bab02c95da4526d53dd9ee5e7. after waiting 0 ms 2024-11-24T18:53:57,494 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnPipelineRestart,,1732474437472.56770e6bab02c95da4526d53dd9ee5e7. 2024-11-24T18:53:57,494 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnPipelineRestart,,1732474437472.56770e6bab02c95da4526d53dd9ee5e7. 2024-11-24T18:53:57,494 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1676): Region close journal for 56770e6bab02c95da4526d53dd9ee5e7: Waiting for close lock at 1732474437494Disabling compacts and flushes for region at 1732474437494Disabling writes for close at 1732474437494Writing region close event to WAL at 1732474437494Closed at 1732474437494 2024-11-24T18:53:57,496 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_ADD_TO_META 2024-11-24T18:53:57,496 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRollOnPipelineRestart,,1732474437472.56770e6bab02c95da4526d53dd9ee5e7.","families":{"info":[{"qualifier":"regioninfo","vlen":77,"tag":[],"timestamp":"1732474437496"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732474437496"}]},"ts":"1732474437496"} 2024-11-24T18:53:57,498 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-24T18:53:57,500 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-24T18:53:57,500 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnPipelineRestart","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732474437500"}]},"ts":"1732474437500"} 2024-11-24T18:53:57,502 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnPipelineRestart, state=ENABLING in hbase:meta 2024-11-24T18:53:57,502 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=56770e6bab02c95da4526d53dd9ee5e7, ASSIGN}] 2024-11-24T18:53:57,504 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=56770e6bab02c95da4526d53dd9ee5e7, ASSIGN 2024-11-24T18:53:57,505 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=56770e6bab02c95da4526d53dd9ee5e7, ASSIGN; state=OFFLINE, location=f2b92657890a,40185,1732474436122; forceNewPlan=false, retain=false 2024-11-24T18:53:57,656 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=56770e6bab02c95da4526d53dd9ee5e7, regionState=OPENING, regionLocation=f2b92657890a,40185,1732474436122 2024-11-24T18:53:57,662 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-11-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=56770e6bab02c95da4526d53dd9ee5e7, ASSIGN because future has completed 2024-11-24T18:53:57,664 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 56770e6bab02c95da4526d53dd9ee5e7, server=f2b92657890a,40185,1732474436122}] 2024-11-24T18:53:57,828 INFO [RS_OPEN_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRollOnPipelineRestart,,1732474437472.56770e6bab02c95da4526d53dd9ee5e7. 2024-11-24T18:53:57,828 DEBUG [RS_OPEN_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 56770e6bab02c95da4526d53dd9ee5e7, NAME => 'TestLogRolling-testLogRollOnPipelineRestart,,1732474437472.56770e6bab02c95da4526d53dd9ee5e7.', STARTKEY => '', ENDKEY => ''} 2024-11-24T18:53:57,829 DEBUG [RS_OPEN_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRollOnPipelineRestart 56770e6bab02c95da4526d53dd9ee5e7 2024-11-24T18:53:57,829 DEBUG [RS_OPEN_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnPipelineRestart,,1732474437472.56770e6bab02c95da4526d53dd9ee5e7.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T18:53:57,830 DEBUG [RS_OPEN_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 56770e6bab02c95da4526d53dd9ee5e7 2024-11-24T18:53:57,830 DEBUG [RS_OPEN_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 56770e6bab02c95da4526d53dd9ee5e7 2024-11-24T18:53:57,832 INFO [StoreOpener-56770e6bab02c95da4526d53dd9ee5e7-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 56770e6bab02c95da4526d53dd9ee5e7 2024-11-24T18:53:57,835 INFO [StoreOpener-56770e6bab02c95da4526d53dd9ee5e7-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 56770e6bab02c95da4526d53dd9ee5e7 columnFamilyName info 2024-11-24T18:53:57,835 DEBUG [StoreOpener-56770e6bab02c95da4526d53dd9ee5e7-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T18:53:57,835 INFO [StoreOpener-56770e6bab02c95da4526d53dd9ee5e7-1 {}] regionserver.HStore(327): Store=56770e6bab02c95da4526d53dd9ee5e7/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T18:53:57,836 DEBUG [RS_OPEN_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 56770e6bab02c95da4526d53dd9ee5e7 2024-11-24T18:53:57,837 DEBUG [RS_OPEN_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45793/user/jenkins/test-data/c800237f-d88d-dd75-c687-231156bbbfff/data/default/TestLogRolling-testLogRollOnPipelineRestart/56770e6bab02c95da4526d53dd9ee5e7 2024-11-24T18:53:57,837 DEBUG [RS_OPEN_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45793/user/jenkins/test-data/c800237f-d88d-dd75-c687-231156bbbfff/data/default/TestLogRolling-testLogRollOnPipelineRestart/56770e6bab02c95da4526d53dd9ee5e7 2024-11-24T18:53:57,838 DEBUG [RS_OPEN_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 56770e6bab02c95da4526d53dd9ee5e7 2024-11-24T18:53:57,838 DEBUG [RS_OPEN_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 56770e6bab02c95da4526d53dd9ee5e7 2024-11-24T18:53:57,841 DEBUG [RS_OPEN_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 56770e6bab02c95da4526d53dd9ee5e7 2024-11-24T18:53:57,844 DEBUG [RS_OPEN_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45793/user/jenkins/test-data/c800237f-d88d-dd75-c687-231156bbbfff/data/default/TestLogRolling-testLogRollOnPipelineRestart/56770e6bab02c95da4526d53dd9ee5e7/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-24T18:53:57,845 INFO [RS_OPEN_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 56770e6bab02c95da4526d53dd9ee5e7; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=794504, jitterRate=0.010264277458190918}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-24T18:53:57,845 DEBUG [RS_OPEN_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 56770e6bab02c95da4526d53dd9ee5e7 2024-11-24T18:53:57,846 DEBUG [RS_OPEN_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 56770e6bab02c95da4526d53dd9ee5e7: Running coprocessor pre-open hook at 1732474437830Writing region info on filesystem at 1732474437830Initializing all the Stores at 1732474437832 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732474437832Cleaning up temporary data from old regions at 1732474437838 (+6 ms)Running coprocessor post-open hooks at 1732474437845 (+7 ms)Region opened successfully at 1732474437845 2024-11-24T18:53:57,847 INFO [RS_OPEN_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRollOnPipelineRestart,,1732474437472.56770e6bab02c95da4526d53dd9ee5e7., pid=6, masterSystemTime=1732474437818 2024-11-24T18:53:57,849 DEBUG [RS_OPEN_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRollOnPipelineRestart,,1732474437472.56770e6bab02c95da4526d53dd9ee5e7. 2024-11-24T18:53:57,849 INFO [RS_OPEN_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRollOnPipelineRestart,,1732474437472.56770e6bab02c95da4526d53dd9ee5e7. 2024-11-24T18:53:57,850 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=56770e6bab02c95da4526d53dd9ee5e7, regionState=OPEN, openSeqNum=2, regionLocation=f2b92657890a,40185,1732474436122 2024-11-24T18:53:57,852 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-11-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 56770e6bab02c95da4526d53dd9ee5e7, server=f2b92657890a,40185,1732474436122 because future has completed 2024-11-24T18:53:57,856 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-24T18:53:57,856 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 56770e6bab02c95da4526d53dd9ee5e7, server=f2b92657890a,40185,1732474436122 in 190 msec 2024-11-24T18:53:57,859 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-24T18:53:57,859 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=56770e6bab02c95da4526d53dd9ee5e7, ASSIGN in 354 msec 2024-11-24T18:53:57,860 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-24T18:53:57,860 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnPipelineRestart","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732474437860"}]},"ts":"1732474437860"} 2024-11-24T18:53:57,862 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnPipelineRestart, state=ENABLED in hbase:meta 2024-11-24T18:53:57,863 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_POST_OPERATION 2024-11-24T18:53:57,865 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart in 390 msec 2024-11-24T18:53:58,328 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-24T18:53:58,328 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-24T18:53:58,330 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart 2024-11-24T18:53:58,330 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart Metrics about Tables on a single HBase RegionServer 2024-11-24T18:53:58,332 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-24T18:53:58,332 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-24T18:53:58,397 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,43977,1732474387094/f2b92657890a%2C43977%2C1732474387094.1732474387326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:53:58,424 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.meta.1732474386931.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:53:59,399 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,43977,1732474387094/f2b92657890a%2C43977%2C1732474387094.1732474387326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:53:59,425 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.meta.1732474386931.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:54:00,400 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,43977,1732474387094/f2b92657890a%2C43977%2C1732474387094.1732474387326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:54:00,426 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.meta.1732474386931.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:54:01,401 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,43977,1732474387094/f2b92657890a%2C43977%2C1732474387094.1732474387326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:54:01,427 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.meta.1732474386931.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:54:02,318 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:54:02,318 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:54:02,318 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:54:02,319 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:54:02,319 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:54:02,319 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:54:02,325 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:54:02,325 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:54:02,326 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:54:02,328 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:54:02,402 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,43977,1732474387094/f2b92657890a%2C43977%2C1732474387094.1732474387326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:54:02,428 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.meta.1732474386931.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:54:02,833 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-24T18:54:02,864 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:54:02,865 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:54:02,865 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:54:02,865 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:54:02,866 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:54:02,866 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:54:02,870 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:54:02,870 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:54:02,871 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:54:02,874 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:54:02,878 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-24T18:54:02,878 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRollOnPipelineRestart' 2024-11-24T18:54:03,403 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,43977,1732474387094/f2b92657890a%2C43977%2C1732474387094.1732474387326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:54:03,428 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.meta.1732474386931.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:54:04,404 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,43977,1732474387094/f2b92657890a%2C43977%2C1732474387094.1732474387326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:54:04,430 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.meta.1732474386931.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:54:05,406 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,43977,1732474387094/f2b92657890a%2C43977%2C1732474387094.1732474387326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:54:05,431 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.meta.1732474386931.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:54:06,407 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,43977,1732474387094/f2b92657890a%2C43977%2C1732474387094.1732474387326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:54:06,432 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.meta.1732474386931.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:54:07,408 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,43977,1732474387094/f2b92657890a%2C43977%2C1732474387094.1732474387326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:54:07,434 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.meta.1732474386931.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:54:07,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39263 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-24T18:54:07,574 INFO [RPCClient-NioEventLoopGroup-4-15 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRollOnPipelineRestart completed 2024-11-24T18:54:07,574 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRollOnPipelineRestart,, stopping at row=TestLogRolling-testLogRollOnPipelineRestart ,, for max=2147483647 with caching=100 2024-11-24T18:54:07,579 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRollOnPipelineRestart 2024-11-24T18:54:07,579 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRollOnPipelineRestart,,1732474437472.56770e6bab02c95da4526d53dd9ee5e7. 2024-11-24T18:54:07,584 DEBUG [RPCClient-NioEventLoopGroup-4-14 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRollOnPipelineRestart', row='row1002', locateType=CURRENT is [region=TestLogRolling-testLogRollOnPipelineRestart,,1732474437472.56770e6bab02c95da4526d53dd9ee5e7., hostname=f2b92657890a,40185,1732474436122, seqNum=2] 2024-11-24T18:54:08,410 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,43977,1732474387094/f2b92657890a%2C43977%2C1732474387094.1732474387326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:54:08,435 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.meta.1732474386931.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:54:09,411 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,43977,1732474387094/f2b92657890a%2C43977%2C1732474387094.1732474387326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:54:09,435 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.meta.1732474386931.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:54:09,588 INFO [Time-limited test {}] wal.TestLogRolling(360): log.getCurrentFileName()): hdfs://localhost:45793/user/jenkins/test-data/c800237f-d88d-dd75-c687-231156bbbfff/WALs/f2b92657890a,40185,1732474436122/f2b92657890a%2C40185%2C1732474436122.1732474436828 2024-11-24T18:54:09,588 WARN [ResponseProcessor for block BP-1026925338-172.17.0.2-1732474433586:blk_1073741830_1006 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1026925338-172.17.0.2-1732474433586:blk_1073741830_1006 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T18:54:09,588 WARN [ResponseProcessor for block BP-1026925338-172.17.0.2-1732474433586:blk_1073741834_1010 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1026925338-172.17.0.2-1732474433586:blk_1073741834_1010 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T18:54:09,589 WARN [DataStreamer for file /user/jenkins/test-data/c800237f-d88d-dd75-c687-231156bbbfff/MasterData/WALs/f2b92657890a,39263,1732474435950/f2b92657890a%2C39263%2C1732474435950.1732474436257 block BP-1026925338-172.17.0.2-1732474433586:blk_1073741830_1006 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1026925338-172.17.0.2-1732474433586:blk_1073741830_1006 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39377,DS-52b4e86e-459b-4be2-811d-1ba9b4d7650a,DISK], DatanodeInfoWithStorage[127.0.0.1:45717,DS-d0cc9942-eed6-414e-ae88-9ba9bf2fa8aa,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39377,DS-52b4e86e-459b-4be2-811d-1ba9b4d7650a,DISK]) is bad. 2024-11-24T18:54:09,590 WARN [ResponseProcessor for block BP-1026925338-172.17.0.2-1732474433586:blk_1073741833_1009 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1026925338-172.17.0.2-1732474433586:blk_1073741833_1009 java.io.IOException: Bad response ERROR for BP-1026925338-172.17.0.2-1732474433586:blk_1073741833_1009 from datanode DatanodeInfoWithStorage[127.0.0.1:39377,DS-52b4e86e-459b-4be2-811d-1ba9b4d7650a,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T18:54:09,590 WARN [DataStreamer for file /user/jenkins/test-data/c800237f-d88d-dd75-c687-231156bbbfff/WALs/f2b92657890a,40185,1732474436122/f2b92657890a%2C40185%2C1732474436122.meta.1732474437285.meta block BP-1026925338-172.17.0.2-1732474433586:blk_1073741834_1010 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1026925338-172.17.0.2-1732474433586:blk_1073741834_1010 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39377,DS-52b4e86e-459b-4be2-811d-1ba9b4d7650a,DISK], DatanodeInfoWithStorage[127.0.0.1:45717,DS-d0cc9942-eed6-414e-ae88-9ba9bf2fa8aa,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39377,DS-52b4e86e-459b-4be2-811d-1ba9b4d7650a,DISK]) is bad. 2024-11-24T18:54:09,590 WARN [DataStreamer for file /user/jenkins/test-data/c800237f-d88d-dd75-c687-231156bbbfff/WALs/f2b92657890a,40185,1732474436122/f2b92657890a%2C40185%2C1732474436122.1732474436828 block BP-1026925338-172.17.0.2-1732474433586:blk_1073741833_1009 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1026925338-172.17.0.2-1732474433586:blk_1073741833_1009 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45717,DS-d0cc9942-eed6-414e-ae88-9ba9bf2fa8aa,DISK], DatanodeInfoWithStorage[127.0.0.1:39377,DS-52b4e86e-459b-4be2-811d-1ba9b4d7650a,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:39377,DS-52b4e86e-459b-4be2-811d-1ba9b4d7650a,DISK]) is bad. 2024-11-24T18:54:09,590 WARN [PacketResponder: BP-1026925338-172.17.0.2-1732474433586:blk_1073741833_1009, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:39377] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T18:54:09,591 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_381802646_22 at /127.0.0.1:48386 [Receiving block BP-1026925338-172.17.0.2-1732474433586:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:45717:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:48386 dst: /127.0.0.1:45717 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T18:54:09,591 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_381802646_22 at /127.0.0.1:59620 [Receiving block BP-1026925338-172.17.0.2-1732474433586:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:39377:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:59620 dst: /127.0.0.1:39377 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T18:54:09,591 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_471649652_22 at /127.0.0.1:59588 [Receiving block BP-1026925338-172.17.0.2-1732474433586:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:39377:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:59588 dst: /127.0.0.1:39377 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T18:54:09,591 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_381802646_22 at /127.0.0.1:48390 [Receiving block BP-1026925338-172.17.0.2-1732474433586:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:45717:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:48390 dst: /127.0.0.1:45717 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T18:54:09,592 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_471649652_22 at /127.0.0.1:48366 [Receiving block BP-1026925338-172.17.0.2-1732474433586:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:45717:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:48366 dst: /127.0.0.1:45717 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T18:54:09,592 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_381802646_22 at /127.0.0.1:59636 [Receiving block BP-1026925338-172.17.0.2-1732474433586:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:39377:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:59636 dst: /127.0.0.1:39377 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T18:54:09,660 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5949a2aa{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T18:54:09,661 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4958e5b2{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-24T18:54:09,661 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-24T18:54:09,661 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@64434c96{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-24T18:54:09,661 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@59ddfe6e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c9f31953-3e74-7f96-380c-dd31e3afaa08/hadoop.log.dir/,STOPPED} 2024-11-24T18:54:09,665 WARN [BP-1026925338-172.17.0.2-1732474433586 heartbeating to localhost/127.0.0.1:45793 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-24T18:54:09,665 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-24T18:54:09,665 WARN [BP-1026925338-172.17.0.2-1732474433586 heartbeating to localhost/127.0.0.1:45793 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1026925338-172.17.0.2-1732474433586 (Datanode Uuid e83292c8-aad5-41af-a651-d4bb377cfa6a) service to localhost/127.0.0.1:45793 2024-11-24T18:54:09,665 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-24T18:54:09,666 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c9f31953-3e74-7f96-380c-dd31e3afaa08/cluster_c7dc029b-5c53-012f-f1b6-357157c49622/data/data3/current/BP-1026925338-172.17.0.2-1732474433586 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T18:54:09,666 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c9f31953-3e74-7f96-380c-dd31e3afaa08/cluster_c7dc029b-5c53-012f-f1b6-357157c49622/data/data4/current/BP-1026925338-172.17.0.2-1732474433586 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T18:54:09,666 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-24T18:54:09,681 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T18:54:09,686 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-24T18:54:09,688 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-24T18:54:09,688 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-24T18:54:09,688 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-24T18:54:09,691 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@62802d30{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c9f31953-3e74-7f96-380c-dd31e3afaa08/hadoop.log.dir/,AVAILABLE} 2024-11-24T18:54:09,691 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2460467f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-24T18:54:09,817 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@443f9e40{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c9f31953-3e74-7f96-380c-dd31e3afaa08/java.io.tmpdir/jetty-localhost-46583-hadoop-hdfs-3_4_1-tests_jar-_-any-14791209205518909839/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T18:54:09,818 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6ff7fcbc{HTTP/1.1, (http/1.1)}{localhost:46583} 2024-11-24T18:54:09,818 INFO [Time-limited test {}] server.Server(415): Started @179170ms 2024-11-24T18:54:09,820 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-24T18:54:09,852 WARN [ResponseProcessor for block BP-1026925338-172.17.0.2-1732474433586:blk_1073741833_1015 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1026925338-172.17.0.2-1732474433586:blk_1073741833_1015 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T18:54:09,852 WARN [ResponseProcessor for block BP-1026925338-172.17.0.2-1732474433586:blk_1073741834_1014 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1026925338-172.17.0.2-1732474433586:blk_1073741834_1014 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T18:54:09,853 WARN [ResponseProcessor for block BP-1026925338-172.17.0.2-1732474433586:blk_1073741830_1013 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1026925338-172.17.0.2-1732474433586:blk_1073741830_1013 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T18:54:09,853 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_381802646_22 at /127.0.0.1:46244 [Receiving block BP-1026925338-172.17.0.2-1732474433586:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:45717:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:46244 dst: /127.0.0.1:45717 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T18:54:09,853 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_381802646_22 at /127.0.0.1:46246 [Receiving block BP-1026925338-172.17.0.2-1732474433586:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:45717:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:46246 dst: /127.0.0.1:45717 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T18:54:09,854 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_471649652_22 at /127.0.0.1:46228 [Receiving block BP-1026925338-172.17.0.2-1732474433586:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:45717:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:46228 dst: /127.0.0.1:45717 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T18:54:09,860 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@6b8d1f8b{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T18:54:09,860 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@62bbed65{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-24T18:54:09,860 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-24T18:54:09,860 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1fc8bed8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-24T18:54:09,860 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3bfb2c17{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c9f31953-3e74-7f96-380c-dd31e3afaa08/hadoop.log.dir/,STOPPED} 2024-11-24T18:54:09,861 WARN [BP-1026925338-172.17.0.2-1732474433586 heartbeating to localhost/127.0.0.1:45793 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-24T18:54:09,861 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-24T18:54:09,861 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-24T18:54:09,861 WARN [BP-1026925338-172.17.0.2-1732474433586 heartbeating to localhost/127.0.0.1:45793 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1026925338-172.17.0.2-1732474433586 (Datanode Uuid 54fb0458-8a6c-4eb6-ba27-59e0846caccf) service to localhost/127.0.0.1:45793 2024-11-24T18:54:09,862 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c9f31953-3e74-7f96-380c-dd31e3afaa08/cluster_c7dc029b-5c53-012f-f1b6-357157c49622/data/data1/current/BP-1026925338-172.17.0.2-1732474433586 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T18:54:09,862 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c9f31953-3e74-7f96-380c-dd31e3afaa08/cluster_c7dc029b-5c53-012f-f1b6-357157c49622/data/data2/current/BP-1026925338-172.17.0.2-1732474433586 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T18:54:09,862 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-24T18:54:09,872 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T18:54:09,875 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-24T18:54:09,876 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-24T18:54:09,876 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-24T18:54:09,876 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-24T18:54:09,877 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6baabd83{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c9f31953-3e74-7f96-380c-dd31e3afaa08/hadoop.log.dir/,AVAILABLE} 2024-11-24T18:54:09,877 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@75ed142f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-24T18:54:10,017 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@39f8899d{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c9f31953-3e74-7f96-380c-dd31e3afaa08/java.io.tmpdir/jetty-localhost-44331-hadoop-hdfs-3_4_1-tests_jar-_-any-13174338629968858473/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T18:54:10,018 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7a441552{HTTP/1.1, (http/1.1)}{localhost:44331} 2024-11-24T18:54:10,018 INFO [Time-limited test {}] server.Server(415): Started @179370ms 2024-11-24T18:54:10,020 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-24T18:54:10,386 WARN [Thread-1335 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-24T18:54:10,388 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x39d652668d4157d3 with lease ID 0x57567d8a483d7f80: from storage DS-52b4e86e-459b-4be2-811d-1ba9b4d7650a node DatanodeRegistration(127.0.0.1:39431, datanodeUuid=e83292c8-aad5-41af-a651-d4bb377cfa6a, infoPort=43251, infoSecurePort=0, ipcPort=45011, storageInfo=lv=-57;cid=testClusterID;nsid=1550467677;c=1732474433586), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T18:54:10,389 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x39d652668d4157d3 with lease ID 0x57567d8a483d7f80: from storage DS-7565b014-fb88-49e0-9c34-ea69daa8f9d3 node DatanodeRegistration(127.0.0.1:39431, datanodeUuid=e83292c8-aad5-41af-a651-d4bb377cfa6a, infoPort=43251, infoSecurePort=0, ipcPort=45011, storageInfo=lv=-57;cid=testClusterID;nsid=1550467677;c=1732474433586), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T18:54:10,411 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,43977,1732474387094/f2b92657890a%2C43977%2C1732474387094.1732474387326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:54:10,436 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.meta.1732474386931.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:54:10,660 WARN [Thread-1355 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-24T18:54:10,662 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc40c111bfbbd949d with lease ID 0x57567d8a483d7f81: from storage DS-d0cc9942-eed6-414e-ae88-9ba9bf2fa8aa node DatanodeRegistration(127.0.0.1:33567, datanodeUuid=54fb0458-8a6c-4eb6-ba27-59e0846caccf, infoPort=40323, infoSecurePort=0, ipcPort=43393, storageInfo=lv=-57;cid=testClusterID;nsid=1550467677;c=1732474433586), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T18:54:10,662 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc40c111bfbbd949d with lease ID 0x57567d8a483d7f81: from storage DS-3b6f802e-9ac7-4a38-91a1-643df6cd5d8f node DatanodeRegistration(127.0.0.1:33567, datanodeUuid=54fb0458-8a6c-4eb6-ba27-59e0846caccf, infoPort=40323, infoSecurePort=0, ipcPort=43393, storageInfo=lv=-57;cid=testClusterID;nsid=1550467677;c=1732474433586), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T18:54:11,056 INFO [Time-limited test {}] wal.TestLogRolling(372): Data Nodes restarted 2024-11-24T18:54:11,059 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1002 2024-11-24T18:54:11,060 ERROR [FSHLog-0-hdfs://localhost:45793/user/jenkins/test-data/c800237f-d88d-dd75-c687-231156bbbfff-prefix:f2b92657890a,40185,1732474436122 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45717,DS-d0cc9942-eed6-414e-ae88-9ba9bf2fa8aa,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T18:54:11,061 WARN [FSHLog-0-hdfs://localhost:45793/user/jenkins/test-data/c800237f-d88d-dd75-c687-231156bbbfff-prefix:f2b92657890a,40185,1732474436122 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45717,DS-d0cc9942-eed6-414e-ae88-9ba9bf2fa8aa,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T18:54:11,061 DEBUG [regionserver/f2b92657890a:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog f2b92657890a%2C40185%2C1732474436122:(num 1732474436828) roll requested 2024-11-24T18:54:11,061 INFO [regionserver/f2b92657890a:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor f2b92657890a%2C40185%2C1732474436122.1732474451061 2024-11-24T18:54:11,068 DEBUG [regionserver/f2b92657890a:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:45793/user/jenkins/test-data/c800237f-d88d-dd75-c687-231156bbbfff/WALs/f2b92657890a,40185,1732474436122/f2b92657890a%2C40185%2C1732474436122.1732474436828 newFile=hdfs://localhost:45793/user/jenkins/test-data/c800237f-d88d-dd75-c687-231156bbbfff/WALs/f2b92657890a,40185,1732474436122/f2b92657890a%2C40185%2C1732474436122.1732474451061 2024-11-24T18:54:11,068 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:54:11,068 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:54:11,068 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:54:11,068 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:54:11,068 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:54:11,068 INFO [regionserver/f2b92657890a:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/c800237f-d88d-dd75-c687-231156bbbfff/WALs/f2b92657890a,40185,1732474436122/f2b92657890a%2C40185%2C1732474436122.1732474436828 with entries=2, filesize=1.59 KB; new WAL /user/jenkins/test-data/c800237f-d88d-dd75-c687-231156bbbfff/WALs/f2b92657890a,40185,1732474436122/f2b92657890a%2C40185%2C1732474436122.1732474451061 2024-11-24T18:54:11,069 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45717,DS-d0cc9942-eed6-414e-ae88-9ba9bf2fa8aa,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T18:54:11,069 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45717,DS-d0cc9942-eed6-414e-ae88-9ba9bf2fa8aa,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T18:54:11,069 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:45793/user/jenkins/test-data/c800237f-d88d-dd75-c687-231156bbbfff/WALs/f2b92657890a,40185,1732474436122/f2b92657890a%2C40185%2C1732474436122.1732474436828 2024-11-24T18:54:11,070 WARN [IPC Server handler 4 on default port 45793 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/c800237f-d88d-dd75-c687-231156bbbfff/WALs/f2b92657890a,40185,1732474436122/f2b92657890a%2C40185%2C1732474436122.1732474436828 has not been closed. Lease recovery is in progress. RecoveryId = 1017 for block blk_1073741833_1015 2024-11-24T18:54:11,070 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:45793/user/jenkins/test-data/c800237f-d88d-dd75-c687-231156bbbfff/WALs/f2b92657890a,40185,1732474436122/f2b92657890a%2C40185%2C1732474436122.1732474436828 after 1ms 2024-11-24T18:54:11,072 DEBUG [regionserver/f2b92657890a:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40323:40323),(127.0.0.1/127.0.0.1:43251:43251)] 2024-11-24T18:54:11,072 DEBUG [regionserver/f2b92657890a:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:45793/user/jenkins/test-data/c800237f-d88d-dd75-c687-231156bbbfff/WALs/f2b92657890a,40185,1732474436122/f2b92657890a%2C40185%2C1732474436122.1732474436828 is not closed yet, will try archiving it next time 2024-11-24T18:54:11,412 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,43977,1732474387094/f2b92657890a%2C43977%2C1732474387094.1732474387326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:54:11,437 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.meta.1732474386931.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:54:12,389 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741833_1015: GenerationStamp not matched, existing replica is blk_1073741833_1009 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-24T18:54:12,413 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,43977,1732474387094/f2b92657890a%2C43977%2C1732474387094.1732474387326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:54:12,438 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.meta.1732474386931.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:54:13,077 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1003 2024-11-24T18:54:13,413 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,43977,1732474387094/f2b92657890a%2C43977%2C1732474387094.1732474387326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:54:13,438 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.meta.1732474386931.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:54:14,414 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,43977,1732474387094/f2b92657890a%2C43977%2C1732474387094.1732474387326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:54:14,439 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.meta.1732474386931.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:54:15,071 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:45793/user/jenkins/test-data/c800237f-d88d-dd75-c687-231156bbbfff/WALs/f2b92657890a,40185,1732474436122/f2b92657890a%2C40185%2C1732474436122.1732474436828 after 4002ms 2024-11-24T18:54:15,082 WARN [ResponseProcessor for block BP-1026925338-172.17.0.2-1732474433586:blk_1073741837_1016 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1026925338-172.17.0.2-1732474433586:blk_1073741837_1016 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T18:54:15,083 WARN [DataStreamer for file /user/jenkins/test-data/c800237f-d88d-dd75-c687-231156bbbfff/WALs/f2b92657890a,40185,1732474436122/f2b92657890a%2C40185%2C1732474436122.1732474451061 block BP-1026925338-172.17.0.2-1732474433586:blk_1073741837_1016 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1026925338-172.17.0.2-1732474433586:blk_1073741837_1016 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33567,DS-d0cc9942-eed6-414e-ae88-9ba9bf2fa8aa,DISK], DatanodeInfoWithStorage[127.0.0.1:39431,DS-52b4e86e-459b-4be2-811d-1ba9b4d7650a,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33567,DS-d0cc9942-eed6-414e-ae88-9ba9bf2fa8aa,DISK]) is bad. 2024-11-24T18:54:15,083 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_381802646_22 at /127.0.0.1:56878 [Receiving block BP-1026925338-172.17.0.2-1732474433586:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:33567:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:56878 dst: /127.0.0.1:33567 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T18:54:15,084 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_381802646_22 at /127.0.0.1:57966 [Receiving block BP-1026925338-172.17.0.2-1732474433586:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:39431:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:57966 dst: /127.0.0.1:39431 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T18:54:15,085 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@39f8899d{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T18:54:15,085 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7a441552{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-24T18:54:15,085 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-24T18:54:15,085 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@75ed142f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-24T18:54:15,085 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6baabd83{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c9f31953-3e74-7f96-380c-dd31e3afaa08/hadoop.log.dir/,STOPPED} 2024-11-24T18:54:15,090 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-24T18:54:15,090 WARN [BP-1026925338-172.17.0.2-1732474433586 heartbeating to localhost/127.0.0.1:45793 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-24T18:54:15,090 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-24T18:54:15,090 WARN [BP-1026925338-172.17.0.2-1732474433586 heartbeating to localhost/127.0.0.1:45793 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1026925338-172.17.0.2-1732474433586 (Datanode Uuid 54fb0458-8a6c-4eb6-ba27-59e0846caccf) service to localhost/127.0.0.1:45793 2024-11-24T18:54:15,090 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c9f31953-3e74-7f96-380c-dd31e3afaa08/cluster_c7dc029b-5c53-012f-f1b6-357157c49622/data/data1/current/BP-1026925338-172.17.0.2-1732474433586 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T18:54:15,091 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c9f31953-3e74-7f96-380c-dd31e3afaa08/cluster_c7dc029b-5c53-012f-f1b6-357157c49622/data/data2/current/BP-1026925338-172.17.0.2-1732474433586 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T18:54:15,091 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-24T18:54:15,102 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T18:54:15,106 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-24T18:54:15,107 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-24T18:54:15,107 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-24T18:54:15,107 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-24T18:54:15,108 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@22abc971{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c9f31953-3e74-7f96-380c-dd31e3afaa08/hadoop.log.dir/,AVAILABLE} 2024-11-24T18:54:15,108 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@19016e01{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-24T18:54:15,208 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4778d192{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c9f31953-3e74-7f96-380c-dd31e3afaa08/java.io.tmpdir/jetty-localhost-41203-hadoop-hdfs-3_4_1-tests_jar-_-any-1388582473631550588/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T18:54:15,209 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@76a8c1ee{HTTP/1.1, (http/1.1)}{localhost:41203} 2024-11-24T18:54:15,209 INFO [Time-limited test {}] server.Server(415): Started @184561ms 2024-11-24T18:54:15,210 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-24T18:54:15,249 WARN [ResponseProcessor for block BP-1026925338-172.17.0.2-1732474433586:blk_1073741837_1018 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1026925338-172.17.0.2-1732474433586:blk_1073741837_1018 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T18:54:15,250 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_381802646_22 at /127.0.0.1:34316 [Receiving block BP-1026925338-172.17.0.2-1732474433586:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:39431:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:34316 dst: /127.0.0.1:39431 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T18:54:15,253 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@443f9e40{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T18:54:15,253 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6ff7fcbc{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-24T18:54:15,253 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-24T18:54:15,253 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2460467f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-24T18:54:15,253 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@62802d30{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c9f31953-3e74-7f96-380c-dd31e3afaa08/hadoop.log.dir/,STOPPED} 2024-11-24T18:54:15,255 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-24T18:54:15,255 WARN [BP-1026925338-172.17.0.2-1732474433586 heartbeating to localhost/127.0.0.1:45793 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-24T18:54:15,255 WARN [BP-1026925338-172.17.0.2-1732474433586 heartbeating to localhost/127.0.0.1:45793 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1026925338-172.17.0.2-1732474433586 (Datanode Uuid e83292c8-aad5-41af-a651-d4bb377cfa6a) service to localhost/127.0.0.1:45793 2024-11-24T18:54:15,255 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-24T18:54:15,255 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c9f31953-3e74-7f96-380c-dd31e3afaa08/cluster_c7dc029b-5c53-012f-f1b6-357157c49622/data/data3/current/BP-1026925338-172.17.0.2-1732474433586 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T18:54:15,255 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c9f31953-3e74-7f96-380c-dd31e3afaa08/cluster_c7dc029b-5c53-012f-f1b6-357157c49622/data/data4/current/BP-1026925338-172.17.0.2-1732474433586 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T18:54:15,256 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-24T18:54:15,267 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T18:54:15,272 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-24T18:54:15,274 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-24T18:54:15,274 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-24T18:54:15,274 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-24T18:54:15,275 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3df3f65e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c9f31953-3e74-7f96-380c-dd31e3afaa08/hadoop.log.dir/,AVAILABLE} 2024-11-24T18:54:15,275 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6c39138a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-24T18:54:15,384 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5b16a6e2{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c9f31953-3e74-7f96-380c-dd31e3afaa08/java.io.tmpdir/jetty-localhost-42193-hadoop-hdfs-3_4_1-tests_jar-_-any-11674619569057578801/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T18:54:15,384 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@27b0e5fe{HTTP/1.1, (http/1.1)}{localhost:42193} 2024-11-24T18:54:15,384 INFO [Time-limited test {}] server.Server(415): Started @184737ms 2024-11-24T18:54:15,386 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-24T18:54:15,415 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,43977,1732474387094/f2b92657890a%2C43977%2C1732474387094.1732474387326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:54:15,439 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.meta.1732474386931.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:54:15,843 WARN [Thread-1409 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-24T18:54:15,845 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x393c77a4161a21c with lease ID 0x57567d8a483d7f82: from storage DS-d0cc9942-eed6-414e-ae88-9ba9bf2fa8aa node DatanodeRegistration(127.0.0.1:43525, datanodeUuid=54fb0458-8a6c-4eb6-ba27-59e0846caccf, infoPort=39835, infoSecurePort=0, ipcPort=38853, storageInfo=lv=-57;cid=testClusterID;nsid=1550467677;c=1732474433586), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T18:54:15,851 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x393c77a4161a21c with lease ID 0x57567d8a483d7f82: from storage DS-3b6f802e-9ac7-4a38-91a1-643df6cd5d8f node DatanodeRegistration(127.0.0.1:43525, datanodeUuid=54fb0458-8a6c-4eb6-ba27-59e0846caccf, infoPort=39835, infoSecurePort=0, ipcPort=38853, storageInfo=lv=-57;cid=testClusterID;nsid=1550467677;c=1732474433586), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T18:54:15,990 WARN [Thread-1429 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-24T18:54:15,992 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x33dd6670e0b86a28 with lease ID 0x57567d8a483d7f83: from storage DS-52b4e86e-459b-4be2-811d-1ba9b4d7650a node DatanodeRegistration(127.0.0.1:33433, datanodeUuid=e83292c8-aad5-41af-a651-d4bb377cfa6a, infoPort=38605, infoSecurePort=0, ipcPort=45129, storageInfo=lv=-57;cid=testClusterID;nsid=1550467677;c=1732474433586), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T18:54:15,992 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x33dd6670e0b86a28 with lease ID 0x57567d8a483d7f83: from storage DS-7565b014-fb88-49e0-9c34-ea69daa8f9d3 node DatanodeRegistration(127.0.0.1:33433, datanodeUuid=e83292c8-aad5-41af-a651-d4bb377cfa6a, infoPort=38605, infoSecurePort=0, ipcPort=45129, storageInfo=lv=-57;cid=testClusterID;nsid=1550467677;c=1732474433586), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T18:54:16,406 INFO [Time-limited test {}] wal.TestLogRolling(389): Data Nodes restarted 2024-11-24T18:54:16,408 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1004 2024-11-24T18:54:16,409 ERROR [FSHLog-0-hdfs://localhost:45793/user/jenkins/test-data/c800237f-d88d-dd75-c687-231156bbbfff-prefix:f2b92657890a,40185,1732474436122 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39431,DS-52b4e86e-459b-4be2-811d-1ba9b4d7650a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T18:54:16,410 WARN [FSHLog-0-hdfs://localhost:45793/user/jenkins/test-data/c800237f-d88d-dd75-c687-231156bbbfff-prefix:f2b92657890a,40185,1732474436122 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39431,DS-52b4e86e-459b-4be2-811d-1ba9b4d7650a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T18:54:16,410 DEBUG [regionserver/f2b92657890a:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog f2b92657890a%2C40185%2C1732474436122:(num 1732474451061) roll requested 2024-11-24T18:54:16,410 INFO [regionserver/f2b92657890a:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor f2b92657890a%2C40185%2C1732474436122.1732474456410 2024-11-24T18:54:16,415 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,43977,1732474387094/f2b92657890a%2C43977%2C1732474387094.1732474387326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:54:16,416 DEBUG [regionserver/f2b92657890a:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:45793/user/jenkins/test-data/c800237f-d88d-dd75-c687-231156bbbfff/WALs/f2b92657890a,40185,1732474436122/f2b92657890a%2C40185%2C1732474436122.1732474451061 newFile=hdfs://localhost:45793/user/jenkins/test-data/c800237f-d88d-dd75-c687-231156bbbfff/WALs/f2b92657890a,40185,1732474436122/f2b92657890a%2C40185%2C1732474436122.1732474456410 2024-11-24T18:54:16,416 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:54:16,416 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:54:16,416 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:54:16,416 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:54:16,416 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:54:16,416 INFO [regionserver/f2b92657890a:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/c800237f-d88d-dd75-c687-231156bbbfff/WALs/f2b92657890a,40185,1732474436122/f2b92657890a%2C40185%2C1732474436122.1732474451061 with entries=2, filesize=2.37 KB; new WAL /user/jenkins/test-data/c800237f-d88d-dd75-c687-231156bbbfff/WALs/f2b92657890a,40185,1732474436122/f2b92657890a%2C40185%2C1732474436122.1732474456410 2024-11-24T18:54:16,417 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39431,DS-52b4e86e-459b-4be2-811d-1ba9b4d7650a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T18:54:16,417 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39431,DS-52b4e86e-459b-4be2-811d-1ba9b4d7650a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T18:54:16,417 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:45793/user/jenkins/test-data/c800237f-d88d-dd75-c687-231156bbbfff/WALs/f2b92657890a,40185,1732474436122/f2b92657890a%2C40185%2C1732474436122.1732474451061 2024-11-24T18:54:16,417 WARN [IPC Server handler 1 on default port 45793 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/c800237f-d88d-dd75-c687-231156bbbfff/WALs/f2b92657890a,40185,1732474436122/f2b92657890a%2C40185%2C1732474436122.1732474451061 has not been closed. Lease recovery is in progress. RecoveryId = 1020 for block blk_1073741837_1018 2024-11-24T18:54:16,417 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:45793/user/jenkins/test-data/c800237f-d88d-dd75-c687-231156bbbfff/WALs/f2b92657890a,40185,1732474436122/f2b92657890a%2C40185%2C1732474436122.1732474451061 after 0ms 2024-11-24T18:54:16,424 DEBUG [regionserver/f2b92657890a:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38605:38605),(127.0.0.1/127.0.0.1:39835:39835)] 2024-11-24T18:54:16,424 DEBUG [regionserver/f2b92657890a:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:45793/user/jenkins/test-data/c800237f-d88d-dd75-c687-231156bbbfff/WALs/f2b92657890a,40185,1732474436122/f2b92657890a%2C40185%2C1732474436122.1732474451061 is not closed yet, will try archiving it next time 2024-11-24T18:54:16,440 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.meta.1732474386931.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:54:17,416 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,43977,1732474387094/f2b92657890a%2C43977%2C1732474387094.1732474387326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:54:17,441 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.meta.1732474386931.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:54:18,416 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,43977,1732474387094/f2b92657890a%2C43977%2C1732474387094.1732474387326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:54:18,426 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor f2b92657890a%2C40185%2C1732474436122.1732474458426 2024-11-24T18:54:18,435 DEBUG [Time-limited test {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:45793/user/jenkins/test-data/c800237f-d88d-dd75-c687-231156bbbfff/WALs/f2b92657890a,40185,1732474436122/f2b92657890a%2C40185%2C1732474436122.1732474456410 newFile=hdfs://localhost:45793/user/jenkins/test-data/c800237f-d88d-dd75-c687-231156bbbfff/WALs/f2b92657890a,40185,1732474436122/f2b92657890a%2C40185%2C1732474436122.1732474458426 2024-11-24T18:54:18,435 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:54:18,435 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:54:18,436 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:54:18,436 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:54:18,436 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:54:18,436 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/c800237f-d88d-dd75-c687-231156bbbfff/WALs/f2b92657890a,40185,1732474436122/f2b92657890a%2C40185%2C1732474436122.1732474456410 with entries=1, filesize=1.23 KB; new WAL /user/jenkins/test-data/c800237f-d88d-dd75-c687-231156bbbfff/WALs/f2b92657890a,40185,1732474436122/f2b92657890a%2C40185%2C1732474436122.1732474458426 2024-11-24T18:54:18,439 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38605:38605),(127.0.0.1/127.0.0.1:39835:39835)] 2024-11-24T18:54:18,439 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:45793/user/jenkins/test-data/c800237f-d88d-dd75-c687-231156bbbfff/WALs/f2b92657890a,40185,1732474436122/f2b92657890a%2C40185%2C1732474436122.1732474451061 is not closed yet, will try archiving it next time 2024-11-24T18:54:18,439 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:45793/user/jenkins/test-data/c800237f-d88d-dd75-c687-231156bbbfff/WALs/f2b92657890a,40185,1732474436122/f2b92657890a%2C40185%2C1732474436122.1732474456410 is not closed yet, will try archiving it next time 2024-11-24T18:54:18,440 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:45793/user/jenkins/test-data/c800237f-d88d-dd75-c687-231156bbbfff/WALs/f2b92657890a,40185,1732474436122/f2b92657890a%2C40185%2C1732474436122.1732474436828 2024-11-24T18:54:18,440 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:45793/user/jenkins/test-data/c800237f-d88d-dd75-c687-231156bbbfff/WALs/f2b92657890a,40185,1732474436122/f2b92657890a%2C40185%2C1732474436122.1732474436828 2024-11-24T18:54:18,440 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43525 is added to blk_1073741838_1019 (size=1264) 2024-11-24T18:54:18,440 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33433 is added to blk_1073741838_1019 (size=1264) 2024-11-24T18:54:18,440 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:45793/user/jenkins/test-data/c800237f-d88d-dd75-c687-231156bbbfff/WALs/f2b92657890a,40185,1732474436122/f2b92657890a%2C40185%2C1732474436122.1732474436828 after 0ms 2024-11-24T18:54:18,441 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/c800237f-d88d-dd75-c687-231156bbbfff/WALs/f2b92657890a,40185,1732474436122/f2b92657890a%2C40185%2C1732474436122.1732474436828 2024-11-24T18:54:18,441 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:45793/user/jenkins/test-data/c800237f-d88d-dd75-c687-231156bbbfff/WALs/f2b92657890a,40185,1732474436122/f2b92657890a%2C40185%2C1732474436122.1732474451061 is not closed yet, will try archiving it next time 2024-11-24T18:54:18,441 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.meta.1732474386931.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:54:18,451 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #3: [\x00/METAFAMILY:HBASE::REGION_EVENT::REGION_OPEN/1732474437846/Put/vlen=218/seqid=0] 2024-11-24T18:54:18,451 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #4: [row1002/info:/1732474447586/Put/vlen=1045/seqid=0] 2024-11-24T18:54:18,451 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/c800237f-d88d-dd75-c687-231156bbbfff/WALs/f2b92657890a,40185,1732474436122/f2b92657890a%2C40185%2C1732474436122.1732474436828 2024-11-24T18:54:18,451 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:45793/user/jenkins/test-data/c800237f-d88d-dd75-c687-231156bbbfff/WALs/f2b92657890a,40185,1732474436122/f2b92657890a%2C40185%2C1732474436122.1732474451061 2024-11-24T18:54:18,451 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:45793/user/jenkins/test-data/c800237f-d88d-dd75-c687-231156bbbfff/WALs/f2b92657890a,40185,1732474436122/f2b92657890a%2C40185%2C1732474436122.1732474451061 2024-11-24T18:54:18,452 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:45793/user/jenkins/test-data/c800237f-d88d-dd75-c687-231156bbbfff/WALs/f2b92657890a,40185,1732474436122/f2b92657890a%2C40185%2C1732474436122.1732474451061 after 1ms 2024-11-24T18:54:18,452 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/c800237f-d88d-dd75-c687-231156bbbfff/WALs/f2b92657890a,40185,1732474436122/f2b92657890a%2C40185%2C1732474436122.1732474451061 2024-11-24T18:54:18,458 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #5: [row1003/info:/1732474451060/Put/vlen=1045/seqid=0] 2024-11-24T18:54:18,458 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #6: [row1004/info:/1732474453079/Put/vlen=1045/seqid=0] 2024-11-24T18:54:18,458 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/c800237f-d88d-dd75-c687-231156bbbfff/WALs/f2b92657890a,40185,1732474436122/f2b92657890a%2C40185%2C1732474436122.1732474451061 2024-11-24T18:54:18,458 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:45793/user/jenkins/test-data/c800237f-d88d-dd75-c687-231156bbbfff/WALs/f2b92657890a,40185,1732474436122/f2b92657890a%2C40185%2C1732474436122.1732474456410 2024-11-24T18:54:18,458 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:45793/user/jenkins/test-data/c800237f-d88d-dd75-c687-231156bbbfff/WALs/f2b92657890a,40185,1732474436122/f2b92657890a%2C40185%2C1732474436122.1732474456410 2024-11-24T18:54:18,459 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:45793/user/jenkins/test-data/c800237f-d88d-dd75-c687-231156bbbfff/WALs/f2b92657890a,40185,1732474436122/f2b92657890a%2C40185%2C1732474436122.1732474456410 after 0ms 2024-11-24T18:54:18,459 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/c800237f-d88d-dd75-c687-231156bbbfff/WALs/f2b92657890a,40185,1732474436122/f2b92657890a%2C40185%2C1732474436122.1732474456410 2024-11-24T18:54:18,462 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #7: [row1005/info:/1732474456409/Put/vlen=1045/seqid=0] 2024-11-24T18:54:18,462 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:45793/user/jenkins/test-data/c800237f-d88d-dd75-c687-231156bbbfff/WALs/f2b92657890a,40185,1732474436122/f2b92657890a%2C40185%2C1732474436122.1732474458426 2024-11-24T18:54:18,463 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:45793/user/jenkins/test-data/c800237f-d88d-dd75-c687-231156bbbfff/WALs/f2b92657890a,40185,1732474436122/f2b92657890a%2C40185%2C1732474436122.1732474458426 2024-11-24T18:54:18,463 WARN [IPC Server handler 0 on default port 45793 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/c800237f-d88d-dd75-c687-231156bbbfff/WALs/f2b92657890a,40185,1732474436122/f2b92657890a%2C40185%2C1732474436122.1732474458426 has not been closed. Lease recovery is in progress. RecoveryId = 1022 for block blk_1073741839_1021 2024-11-24T18:54:18,464 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:45793/user/jenkins/test-data/c800237f-d88d-dd75-c687-231156bbbfff/WALs/f2b92657890a,40185,1732474436122/f2b92657890a%2C40185%2C1732474436122.1732474458426 after 0ms 2024-11-24T18:54:18,995 WARN [ResponseProcessor for block BP-1026925338-172.17.0.2-1732474433586:blk_1073741839_1021 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1026925338-172.17.0.2-1732474433586:blk_1073741839_1021 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T18:54:18,995 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_471649652_22 at /127.0.0.1:49720 [Receiving block BP-1026925338-172.17.0.2-1732474433586:blk_1073741839_1021] {}] datanode.DataXceiver(331): 127.0.0.1:33433:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:49720 dst: /127.0.0.1:33433 java.io.InterruptedIOException: Interrupted while waiting for IO on channel java.nio.channels.SocketChannel[connected local=localhost/127.0.0.1:33433 remote=/127.0.0.1:49720]. Total timeout mills is 60000, 59439 millis timeout left. at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:350) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T18:54:18,995 WARN [DataStreamer for file /user/jenkins/test-data/c800237f-d88d-dd75-c687-231156bbbfff/WALs/f2b92657890a,40185,1732474436122/f2b92657890a%2C40185%2C1732474436122.1732474458426 block BP-1026925338-172.17.0.2-1732474433586:blk_1073741839_1021 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1026925338-172.17.0.2-1732474433586:blk_1073741839_1021 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33433,DS-52b4e86e-459b-4be2-811d-1ba9b4d7650a,DISK], DatanodeInfoWithStorage[127.0.0.1:43525,DS-d0cc9942-eed6-414e-ae88-9ba9bf2fa8aa,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33433,DS-52b4e86e-459b-4be2-811d-1ba9b4d7650a,DISK]) is bad. 2024-11-24T18:54:18,995 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_471649652_22 at /127.0.0.1:58034 [Receiving block BP-1026925338-172.17.0.2-1732474433586:blk_1073741839_1021] {}] datanode.DataXceiver(331): 127.0.0.1:43525:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:58034 dst: /127.0.0.1:43525 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T18:54:18,997 WARN [DataStreamer for file /user/jenkins/test-data/c800237f-d88d-dd75-c687-231156bbbfff/WALs/f2b92657890a,40185,1732474436122/f2b92657890a%2C40185%2C1732474436122.1732474458426 block BP-1026925338-172.17.0.2-1732474433586:blk_1073741839_1021 {}] hdfs.DataStreamer(859): DataStreamer Exception org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-1026925338-172.17.0.2-1732474433586:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T18:54:19,003 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33433 is added to blk_1073741839_1022 (size=85) 2024-11-24T18:54:19,006 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43525 is added to blk_1073741839_1022 (size=85) 2024-11-24T18:54:19,417 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,43977,1732474387094/f2b92657890a%2C43977%2C1732474387094.1732474387326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:54:19,442 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.meta.1732474386931.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:54:20,418 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,43977,1732474387094/f2b92657890a%2C43977%2C1732474387094.1732474387326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:54:20,419 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:45793/user/jenkins/test-data/c800237f-d88d-dd75-c687-231156bbbfff/WALs/f2b92657890a,40185,1732474436122/f2b92657890a%2C40185%2C1732474436122.1732474451061 after 4002ms 2024-11-24T18:54:20,443 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.meta.1732474386931.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:54:21,419 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,43977,1732474387094/f2b92657890a%2C43977%2C1732474387094.1732474387326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:54:21,443 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.meta.1732474386931.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:54:21,850 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741837_1018: GenerationStamp not matched, existing replica is blk_1073741837_1016 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-24T18:54:22,420 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,43977,1732474387094/f2b92657890a%2C43977%2C1732474387094.1732474387326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:54:22,444 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.meta.1732474386931.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:54:22,465 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:45793/user/jenkins/test-data/c800237f-d88d-dd75-c687-231156bbbfff/WALs/f2b92657890a,40185,1732474436122/f2b92657890a%2C40185%2C1732474436122.1732474458426 after 4002ms 2024-11-24T18:54:22,465 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/c800237f-d88d-dd75-c687-231156bbbfff/WALs/f2b92657890a,40185,1732474436122/f2b92657890a%2C40185%2C1732474436122.1732474458426 2024-11-24T18:54:22,471 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/c800237f-d88d-dd75-c687-231156bbbfff/WALs/f2b92657890a,40185,1732474436122/f2b92657890a%2C40185%2C1732474436122.1732474458426 2024-11-24T18:54:22,472 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.74 KB heapSize=3.77 KB 2024-11-24T18:54:22,472 ERROR [FSHLog-0-hdfs://localhost:45793/user/jenkins/test-data/c800237f-d88d-dd75-c687-231156bbbfff-prefix:f2b92657890a,40185,1732474436122.meta {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45717,DS-d0cc9942-eed6-414e-ae88-9ba9bf2fa8aa,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T18:54:22,472 WARN [FSHLog-0-hdfs://localhost:45793/user/jenkins/test-data/c800237f-d88d-dd75-c687-231156bbbfff-prefix:f2b92657890a,40185,1732474436122.meta {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45717,DS-d0cc9942-eed6-414e-ae88-9ba9bf2fa8aa,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T18:54:22,473 DEBUG [regionserver/f2b92657890a:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog f2b92657890a%2C40185%2C1732474436122.meta:.meta(num 1732474437285) roll requested 2024-11-24T18:54:22,473 INFO [regionserver/f2b92657890a:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor f2b92657890a%2C40185%2C1732474436122.meta.1732474462473.meta 2024-11-24T18:54:22,480 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:54:22,481 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:54:22,481 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:54:22,481 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:54:22,481 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:54:22,481 INFO [regionserver/f2b92657890a:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/c800237f-d88d-dd75-c687-231156bbbfff/WALs/f2b92657890a,40185,1732474436122/f2b92657890a%2C40185%2C1732474436122.meta.1732474437285.meta with entries=8, filesize=2.36 KB; new WAL /user/jenkins/test-data/c800237f-d88d-dd75-c687-231156bbbfff/WALs/f2b92657890a,40185,1732474436122/f2b92657890a%2C40185%2C1732474436122.meta.1732474462473.meta 2024-11-24T18:54:22,483 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45717,DS-d0cc9942-eed6-414e-ae88-9ba9bf2fa8aa,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T18:54:22,483 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45717,DS-d0cc9942-eed6-414e-ae88-9ba9bf2fa8aa,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T18:54:22,483 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:45793/user/jenkins/test-data/c800237f-d88d-dd75-c687-231156bbbfff/WALs/f2b92657890a,40185,1732474436122/f2b92657890a%2C40185%2C1732474436122.meta.1732474437285.meta 2024-11-24T18:54:22,483 WARN [IPC Server handler 1 on default port 45793 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/c800237f-d88d-dd75-c687-231156bbbfff/WALs/f2b92657890a,40185,1732474436122/f2b92657890a%2C40185%2C1732474436122.meta.1732474437285.meta has not been closed. Lease recovery is in progress. RecoveryId = 1024 for block blk_1073741834_1014 2024-11-24T18:54:22,484 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:45793/user/jenkins/test-data/c800237f-d88d-dd75-c687-231156bbbfff/WALs/f2b92657890a,40185,1732474436122/f2b92657890a%2C40185%2C1732474436122.meta.1732474437285.meta after 1ms 2024-11-24T18:54:22,484 DEBUG [regionserver/f2b92657890a:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38605:38605),(127.0.0.1/127.0.0.1:39835:39835)] 2024-11-24T18:54:22,484 DEBUG [regionserver/f2b92657890a:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:45793/user/jenkins/test-data/c800237f-d88d-dd75-c687-231156bbbfff/WALs/f2b92657890a,40185,1732474436122/f2b92657890a%2C40185%2C1732474436122.meta.1732474437285.meta is not closed yet, will try archiving it next time 2024-11-24T18:54:22,499 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45793/user/jenkins/test-data/c800237f-d88d-dd75-c687-231156bbbfff/data/hbase/meta/1588230740/.tmp/info/2872e9ec8d4a4ce1a00ea7c943060448 is 207, key is TestLogRolling-testLogRollOnPipelineRestart,,1732474437472.56770e6bab02c95da4526d53dd9ee5e7./info:regioninfo/1732474437850/Put/seqid=0 2024-11-24T18:54:22,504 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43525 is added to blk_1073741841_1025 (size=7125) 2024-11-24T18:54:22,504 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33433 is added to blk_1073741841_1025 (size=7125) 2024-11-24T18:54:22,504 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.52 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:45793/user/jenkins/test-data/c800237f-d88d-dd75-c687-231156bbbfff/data/hbase/meta/1588230740/.tmp/info/2872e9ec8d4a4ce1a00ea7c943060448 2024-11-24T18:54:22,522 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45793/user/jenkins/test-data/c800237f-d88d-dd75-c687-231156bbbfff/data/hbase/meta/1588230740/.tmp/ns/9dfe9ecaffdc4fcdbc547dc99374d3ca is 43, key is default/ns:d/1732474437362/Put/seqid=0 2024-11-24T18:54:22,527 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33433 is added to blk_1073741842_1026 (size=5153) 2024-11-24T18:54:22,527 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43525 is added to blk_1073741842_1026 (size=5153) 2024-11-24T18:54:22,527 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:45793/user/jenkins/test-data/c800237f-d88d-dd75-c687-231156bbbfff/data/hbase/meta/1588230740/.tmp/ns/9dfe9ecaffdc4fcdbc547dc99374d3ca 2024-11-24T18:54:22,546 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45793/user/jenkins/test-data/c800237f-d88d-dd75-c687-231156bbbfff/data/hbase/meta/1588230740/.tmp/table/2b9d67c58fbd4c738d54bf6babd085d2 is 79, key is TestLogRolling-testLogRollOnPipelineRestart/table:state/1732474437860/Put/seqid=0 2024-11-24T18:54:22,550 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33433 is added to blk_1073741843_1027 (size=5438) 2024-11-24T18:54:22,550 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43525 is added to blk_1073741843_1027 (size=5438) 2024-11-24T18:54:22,551 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=150 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:45793/user/jenkins/test-data/c800237f-d88d-dd75-c687-231156bbbfff/data/hbase/meta/1588230740/.tmp/table/2b9d67c58fbd4c738d54bf6babd085d2 2024-11-24T18:54:22,557 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45793/user/jenkins/test-data/c800237f-d88d-dd75-c687-231156bbbfff/data/hbase/meta/1588230740/.tmp/info/2872e9ec8d4a4ce1a00ea7c943060448 as hdfs://localhost:45793/user/jenkins/test-data/c800237f-d88d-dd75-c687-231156bbbfff/data/hbase/meta/1588230740/info/2872e9ec8d4a4ce1a00ea7c943060448 2024-11-24T18:54:22,562 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45793/user/jenkins/test-data/c800237f-d88d-dd75-c687-231156bbbfff/data/hbase/meta/1588230740/info/2872e9ec8d4a4ce1a00ea7c943060448, entries=10, sequenceid=11, filesize=7.0 K 2024-11-24T18:54:22,563 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45793/user/jenkins/test-data/c800237f-d88d-dd75-c687-231156bbbfff/data/hbase/meta/1588230740/.tmp/ns/9dfe9ecaffdc4fcdbc547dc99374d3ca as hdfs://localhost:45793/user/jenkins/test-data/c800237f-d88d-dd75-c687-231156bbbfff/data/hbase/meta/1588230740/ns/9dfe9ecaffdc4fcdbc547dc99374d3ca 2024-11-24T18:54:22,569 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45793/user/jenkins/test-data/c800237f-d88d-dd75-c687-231156bbbfff/data/hbase/meta/1588230740/ns/9dfe9ecaffdc4fcdbc547dc99374d3ca, entries=2, sequenceid=11, filesize=5.0 K 2024-11-24T18:54:22,570 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45793/user/jenkins/test-data/c800237f-d88d-dd75-c687-231156bbbfff/data/hbase/meta/1588230740/.tmp/table/2b9d67c58fbd4c738d54bf6babd085d2 as hdfs://localhost:45793/user/jenkins/test-data/c800237f-d88d-dd75-c687-231156bbbfff/data/hbase/meta/1588230740/table/2b9d67c58fbd4c738d54bf6babd085d2 2024-11-24T18:54:22,576 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45793/user/jenkins/test-data/c800237f-d88d-dd75-c687-231156bbbfff/data/hbase/meta/1588230740/table/2b9d67c58fbd4c738d54bf6babd085d2, entries=2, sequenceid=11, filesize=5.3 K 2024-11-24T18:54:22,578 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~1.74 KB/1782, heapSize ~3.48 KB/3560, currentSize=0 B/0 for 1588230740 in 107ms, sequenceid=11, compaction requested=false 2024-11-24T18:54:22,578 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-11-24T18:54:22,578 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 56770e6bab02c95da4526d53dd9ee5e7 1/1 column families, dataSize=4.20 KB heapSize=4.75 KB 2024-11-24T18:54:22,578 ERROR [FSHLog-0-hdfs://localhost:45793/user/jenkins/test-data/c800237f-d88d-dd75-c687-231156bbbfff-prefix:f2b92657890a,40185,1732474436122 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-1026925338-172.17.0.2-1732474433586:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T18:54:22,579 WARN [FSHLog-0-hdfs://localhost:45793/user/jenkins/test-data/c800237f-d88d-dd75-c687-231156bbbfff-prefix:f2b92657890a,40185,1732474436122 {}] wal.AbstractFSWAL(2174): append entry failed org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-1026925338-172.17.0.2-1732474433586:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T18:54:22,579 DEBUG [regionserver/f2b92657890a:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog f2b92657890a%2C40185%2C1732474436122:(num 1732474458426) roll requested 2024-11-24T18:54:22,579 INFO [regionserver/f2b92657890a:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor f2b92657890a%2C40185%2C1732474436122.1732474462579 2024-11-24T18:54:22,584 DEBUG [regionserver/f2b92657890a:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:45793/user/jenkins/test-data/c800237f-d88d-dd75-c687-231156bbbfff/WALs/f2b92657890a,40185,1732474436122/f2b92657890a%2C40185%2C1732474436122.1732474458426 newFile=hdfs://localhost:45793/user/jenkins/test-data/c800237f-d88d-dd75-c687-231156bbbfff/WALs/f2b92657890a,40185,1732474436122/f2b92657890a%2C40185%2C1732474436122.1732474462579 2024-11-24T18:54:22,584 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:54:22,584 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:54:22,584 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:54:22,584 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:54:22,584 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:54:22,584 INFO [regionserver/f2b92657890a:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/c800237f-d88d-dd75-c687-231156bbbfff/WALs/f2b92657890a,40185,1732474436122/f2b92657890a%2C40185%2C1732474436122.1732474458426 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/c800237f-d88d-dd75-c687-231156bbbfff/WALs/f2b92657890a,40185,1732474436122/f2b92657890a%2C40185%2C1732474436122.1732474462579 2024-11-24T18:54:22,584 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-1026925338-172.17.0.2-1732474433586:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T18:54:22,585 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-1026925338-172.17.0.2-1732474433586:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T18:54:22,585 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:45793/user/jenkins/test-data/c800237f-d88d-dd75-c687-231156bbbfff/WALs/f2b92657890a,40185,1732474436122/f2b92657890a%2C40185%2C1732474436122.1732474458426 2024-11-24T18:54:22,586 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:45793/user/jenkins/test-data/c800237f-d88d-dd75-c687-231156bbbfff/WALs/f2b92657890a,40185,1732474436122/f2b92657890a%2C40185%2C1732474436122.1732474458426 after 1ms 2024-11-24T18:54:22,587 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:45793/user/jenkins/test-data/c800237f-d88d-dd75-c687-231156bbbfff/WALs/f2b92657890a,40185,1732474436122/f2b92657890a%2C40185%2C1732474436122.1732474458426 to hdfs://localhost:45793/user/jenkins/test-data/c800237f-d88d-dd75-c687-231156bbbfff/oldWALs/f2b92657890a%2C40185%2C1732474436122.1732474458426 2024-11-24T18:54:22,588 DEBUG [regionserver/f2b92657890a:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39835:39835),(127.0.0.1/127.0.0.1:38605:38605)] 2024-11-24T18:54:22,601 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45793/user/jenkins/test-data/c800237f-d88d-dd75-c687-231156bbbfff/data/default/TestLogRolling-testLogRollOnPipelineRestart/56770e6bab02c95da4526d53dd9ee5e7/.tmp/info/42e240353b294e6388ab0be0cb049d63 is 1080, key is row1002/info:/1732474447586/Put/seqid=0 2024-11-24T18:54:22,606 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33433 is added to blk_1073741845_1029 (size=9270) 2024-11-24T18:54:22,606 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43525 is added to blk_1073741845_1029 (size=9270) 2024-11-24T18:54:22,607 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.20 KB at sequenceid=8 (bloomFilter=true), to=hdfs://localhost:45793/user/jenkins/test-data/c800237f-d88d-dd75-c687-231156bbbfff/data/default/TestLogRolling-testLogRollOnPipelineRestart/56770e6bab02c95da4526d53dd9ee5e7/.tmp/info/42e240353b294e6388ab0be0cb049d63 2024-11-24T18:54:22,613 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45793/user/jenkins/test-data/c800237f-d88d-dd75-c687-231156bbbfff/data/default/TestLogRolling-testLogRollOnPipelineRestart/56770e6bab02c95da4526d53dd9ee5e7/.tmp/info/42e240353b294e6388ab0be0cb049d63 as hdfs://localhost:45793/user/jenkins/test-data/c800237f-d88d-dd75-c687-231156bbbfff/data/default/TestLogRolling-testLogRollOnPipelineRestart/56770e6bab02c95da4526d53dd9ee5e7/info/42e240353b294e6388ab0be0cb049d63 2024-11-24T18:54:22,619 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45793/user/jenkins/test-data/c800237f-d88d-dd75-c687-231156bbbfff/data/default/TestLogRolling-testLogRollOnPipelineRestart/56770e6bab02c95da4526d53dd9ee5e7/info/42e240353b294e6388ab0be0cb049d63, entries=4, sequenceid=8, filesize=9.1 K 2024-11-24T18:54:22,620 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~4.20 KB/4304, heapSize ~4.73 KB/4848, currentSize=0 B/0 for 56770e6bab02c95da4526d53dd9ee5e7 in 42ms, sequenceid=8, compaction requested=false 2024-11-24T18:54:22,620 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 56770e6bab02c95da4526d53dd9ee5e7: 2024-11-24T18:54:22,626 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-24T18:54:22,626 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-24T18:54:22,626 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-24T18:54:22,626 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T18:54:22,626 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T18:54:22,626 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-24T18:54:22,627 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-24T18:54:22,627 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=303701444, stopped=false 2024-11-24T18:54:22,627 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=f2b92657890a,39263,1732474435950 2024-11-24T18:54:22,669 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40185-0x1016e31f5020001, quorum=127.0.0.1:59413, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-24T18:54:22,669 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39263-0x1016e31f5020000, quorum=127.0.0.1:59413, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-24T18:54:22,669 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39263-0x1016e31f5020000, quorum=127.0.0.1:59413, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T18:54:22,669 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40185-0x1016e31f5020001, quorum=127.0.0.1:59413, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T18:54:22,670 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-24T18:54:22,670 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-24T18:54:22,670 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-24T18:54:22,670 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T18:54:22,670 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:39263-0x1016e31f5020000, quorum=127.0.0.1:59413, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-24T18:54:22,670 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:40185-0x1016e31f5020001, quorum=127.0.0.1:59413, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-24T18:54:22,671 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'f2b92657890a,40185,1732474436122' ***** 2024-11-24T18:54:22,671 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-24T18:54:22,671 INFO [RS:0;f2b92657890a:40185 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-24T18:54:22,671 INFO [RS:0;f2b92657890a:40185 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-24T18:54:22,671 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-24T18:54:22,671 INFO [RS:0;f2b92657890a:40185 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-24T18:54:22,671 INFO [RS:0;f2b92657890a:40185 {}] regionserver.HRegionServer(3091): Received CLOSE for 56770e6bab02c95da4526d53dd9ee5e7 2024-11-24T18:54:22,672 INFO [RS:0;f2b92657890a:40185 {}] regionserver.HRegionServer(959): stopping server f2b92657890a,40185,1732474436122 2024-11-24T18:54:22,672 INFO [RS:0;f2b92657890a:40185 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-24T18:54:22,672 INFO [RS:0;f2b92657890a:40185 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;f2b92657890a:40185. 2024-11-24T18:54:22,672 DEBUG [RS_CLOSE_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 56770e6bab02c95da4526d53dd9ee5e7, disabling compactions & flushes 2024-11-24T18:54:22,672 DEBUG [RS:0;f2b92657890a:40185 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-24T18:54:22,672 DEBUG [RS:0;f2b92657890a:40185 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T18:54:22,672 INFO [RS_CLOSE_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnPipelineRestart,,1732474437472.56770e6bab02c95da4526d53dd9ee5e7. 2024-11-24T18:54:22,672 DEBUG [RS_CLOSE_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnPipelineRestart,,1732474437472.56770e6bab02c95da4526d53dd9ee5e7. 2024-11-24T18:54:22,672 INFO [RS:0;f2b92657890a:40185 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-24T18:54:22,672 DEBUG [RS_CLOSE_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnPipelineRestart,,1732474437472.56770e6bab02c95da4526d53dd9ee5e7. after waiting 0 ms 2024-11-24T18:54:22,672 INFO [RS:0;f2b92657890a:40185 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-24T18:54:22,672 INFO [RS:0;f2b92657890a:40185 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-24T18:54:22,672 DEBUG [RS_CLOSE_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnPipelineRestart,,1732474437472.56770e6bab02c95da4526d53dd9ee5e7. 2024-11-24T18:54:22,672 INFO [RS:0;f2b92657890a:40185 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-24T18:54:22,673 INFO [RS:0;f2b92657890a:40185 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-24T18:54:22,673 DEBUG [RS:0;f2b92657890a:40185 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, 56770e6bab02c95da4526d53dd9ee5e7=TestLogRolling-testLogRollOnPipelineRestart,,1732474437472.56770e6bab02c95da4526d53dd9ee5e7.} 2024-11-24T18:54:22,673 DEBUG [RS:0;f2b92657890a:40185 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 56770e6bab02c95da4526d53dd9ee5e7 2024-11-24T18:54:22,673 DEBUG [RS_CLOSE_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-24T18:54:22,673 INFO [RS_CLOSE_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-24T18:54:22,673 DEBUG [RS_CLOSE_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-24T18:54:22,673 DEBUG [RS_CLOSE_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-24T18:54:22,673 DEBUG [RS_CLOSE_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-24T18:54:22,678 DEBUG [RS_CLOSE_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45793/user/jenkins/test-data/c800237f-d88d-dd75-c687-231156bbbfff/data/default/TestLogRolling-testLogRollOnPipelineRestart/56770e6bab02c95da4526d53dd9ee5e7/recovered.edits/11.seqid, newMaxSeqId=11, maxSeqId=1 2024-11-24T18:54:22,679 INFO [RS_CLOSE_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnPipelineRestart,,1732474437472.56770e6bab02c95da4526d53dd9ee5e7. 2024-11-24T18:54:22,679 DEBUG [RS_CLOSE_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45793/user/jenkins/test-data/c800237f-d88d-dd75-c687-231156bbbfff/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-24T18:54:22,679 DEBUG [RS_CLOSE_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 56770e6bab02c95da4526d53dd9ee5e7: Waiting for close lock at 1732474462672Running coprocessor pre-close hooks at 1732474462672Disabling compacts and flushes for region at 1732474462672Disabling writes for close at 1732474462672Writing region close event to WAL at 1732474462673 (+1 ms)Running coprocessor post-close hooks at 1732474462679 (+6 ms)Closed at 1732474462679 2024-11-24T18:54:22,679 DEBUG [RS_CLOSE_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRollOnPipelineRestart,,1732474437472.56770e6bab02c95da4526d53dd9ee5e7. 2024-11-24T18:54:22,680 DEBUG [RS_CLOSE_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-24T18:54:22,680 INFO [RS_CLOSE_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-24T18:54:22,680 DEBUG [RS_CLOSE_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732474462673Running coprocessor pre-close hooks at 1732474462673Disabling compacts and flushes for region at 1732474462673Disabling writes for close at 1732474462673Writing region close event to WAL at 1732474462675 (+2 ms)Running coprocessor post-close hooks at 1732474462680 (+5 ms)Closed at 1732474462680 2024-11-24T18:54:22,680 DEBUG [RS_CLOSE_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-24T18:54:22,693 INFO [regionserver/f2b92657890a:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-24T18:54:22,693 INFO [regionserver/f2b92657890a:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-24T18:54:22,694 INFO [regionserver/f2b92657890a:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-24T18:54:22,873 INFO [RS:0;f2b92657890a:40185 {}] regionserver.HRegionServer(976): stopping server f2b92657890a,40185,1732474436122; all regions closed. 2024-11-24T18:54:22,874 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:54:22,874 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:54:22,874 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:54:22,874 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:54:22,874 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:54:22,876 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33433 is added to blk_1073741840_1023 (size=825) 2024-11-24T18:54:22,876 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43525 is added to blk_1073741840_1023 (size=825) 2024-11-24T18:54:23,420 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,43977,1732474387094/f2b92657890a%2C43977%2C1732474387094.1732474387326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:54:23,445 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.meta.1732474386931.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:54:24,421 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,43977,1732474387094/f2b92657890a%2C43977%2C1732474387094.1732474387326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:54:24,446 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.meta.1732474386931.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:54:24,992 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741834_1014: GenerationStamp not matched, existing replica is blk_1073741834_1010 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-24T18:54:25,422 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,43977,1732474387094/f2b92657890a%2C43977%2C1732474387094.1732474387326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:54:25,447 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.meta.1732474386931.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:54:25,903 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-24T18:54:26,423 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,43977,1732474387094/f2b92657890a%2C43977%2C1732474387094.1732474387326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:54:26,448 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.meta.1732474386931.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:54:26,485 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:45793/user/jenkins/test-data/c800237f-d88d-dd75-c687-231156bbbfff/WALs/f2b92657890a,40185,1732474436122/f2b92657890a%2C40185%2C1732474436122.meta.1732474437285.meta after 4002ms 2024-11-24T18:54:26,486 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:45793/user/jenkins/test-data/c800237f-d88d-dd75-c687-231156bbbfff/WALs/f2b92657890a,40185,1732474436122/f2b92657890a%2C40185%2C1732474436122.meta.1732474437285.meta to hdfs://localhost:45793/user/jenkins/test-data/c800237f-d88d-dd75-c687-231156bbbfff/oldWALs/f2b92657890a%2C40185%2C1732474436122.meta.1732474437285.meta 2024-11-24T18:54:26,492 DEBUG [RS:0;f2b92657890a:40185 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/c800237f-d88d-dd75-c687-231156bbbfff/oldWALs 2024-11-24T18:54:26,492 INFO [RS:0;f2b92657890a:40185 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog f2b92657890a%2C40185%2C1732474436122.meta:.meta(num 1732474462473) 2024-11-24T18:54:26,493 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:54:26,493 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:54:26,494 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:54:26,494 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:54:26,494 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:54:26,496 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33433 is added to blk_1073741844_1028 (size=1162) 2024-11-24T18:54:26,497 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43525 is added to blk_1073741844_1028 (size=1162) 2024-11-24T18:54:26,502 DEBUG [RS:0;f2b92657890a:40185 {}] wal.AbstractFSWAL(1256): Moved 4 WAL file(s) to /user/jenkins/test-data/c800237f-d88d-dd75-c687-231156bbbfff/oldWALs 2024-11-24T18:54:26,502 INFO [RS:0;f2b92657890a:40185 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog f2b92657890a%2C40185%2C1732474436122:(num 1732474462579) 2024-11-24T18:54:26,502 DEBUG [RS:0;f2b92657890a:40185 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T18:54:26,502 INFO [RS:0;f2b92657890a:40185 {}] regionserver.LeaseManager(133): Closed leases 2024-11-24T18:54:26,502 INFO [RS:0;f2b92657890a:40185 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-24T18:54:26,503 INFO [RS:0;f2b92657890a:40185 {}] hbase.ChoreService(370): Chore service for: regionserver/f2b92657890a:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-24T18:54:26,503 INFO [RS:0;f2b92657890a:40185 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-24T18:54:26,503 INFO [regionserver/f2b92657890a:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-24T18:54:26,503 INFO [RS:0;f2b92657890a:40185 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:40185 2024-11-24T18:54:26,553 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39263-0x1016e31f5020000, quorum=127.0.0.1:59413, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-24T18:54:26,553 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40185-0x1016e31f5020001, quorum=127.0.0.1:59413, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/f2b92657890a,40185,1732474436122 2024-11-24T18:54:26,553 INFO [RS:0;f2b92657890a:40185 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-24T18:54:26,564 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [f2b92657890a,40185,1732474436122] 2024-11-24T18:54:26,574 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/f2b92657890a,40185,1732474436122 already deleted, retry=false 2024-11-24T18:54:26,574 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; f2b92657890a,40185,1732474436122 expired; onlineServers=0 2024-11-24T18:54:26,574 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'f2b92657890a,39263,1732474435950' ***** 2024-11-24T18:54:26,575 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-24T18:54:26,575 INFO [M:0;f2b92657890a:39263 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-24T18:54:26,575 INFO [M:0;f2b92657890a:39263 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-24T18:54:26,575 DEBUG [M:0;f2b92657890a:39263 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-24T18:54:26,575 DEBUG [M:0;f2b92657890a:39263 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-24T18:54:26,576 DEBUG [master/f2b92657890a:0:becomeActiveMaster-HFileCleaner.large.0-1732474436610 {}] cleaner.HFileCleaner(306): Exit Thread[master/f2b92657890a:0:becomeActiveMaster-HFileCleaner.large.0-1732474436610,5,FailOnTimeoutGroup] 2024-11-24T18:54:26,576 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-24T18:54:26,576 DEBUG [master/f2b92657890a:0:becomeActiveMaster-HFileCleaner.small.0-1732474436610 {}] cleaner.HFileCleaner(306): Exit Thread[master/f2b92657890a:0:becomeActiveMaster-HFileCleaner.small.0-1732474436610,5,FailOnTimeoutGroup] 2024-11-24T18:54:26,576 INFO [M:0;f2b92657890a:39263 {}] hbase.ChoreService(370): Chore service for: master/f2b92657890a:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-24T18:54:26,576 INFO [M:0;f2b92657890a:39263 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-24T18:54:26,577 DEBUG [M:0;f2b92657890a:39263 {}] master.HMaster(1795): Stopping service threads 2024-11-24T18:54:26,577 INFO [M:0;f2b92657890a:39263 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-24T18:54:26,577 INFO [M:0;f2b92657890a:39263 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-24T18:54:26,577 INFO [M:0;f2b92657890a:39263 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-24T18:54:26,577 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-24T18:54:26,585 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39263-0x1016e31f5020000, quorum=127.0.0.1:59413, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-24T18:54:26,585 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39263-0x1016e31f5020000, quorum=127.0.0.1:59413, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T18:54:26,585 DEBUG [M:0;f2b92657890a:39263 {}] zookeeper.ZKUtil(347): master:39263-0x1016e31f5020000, quorum=127.0.0.1:59413, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-24T18:54:26,585 WARN [M:0;f2b92657890a:39263 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-24T18:54:26,586 INFO [M:0;f2b92657890a:39263 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:45793/user/jenkins/test-data/c800237f-d88d-dd75-c687-231156bbbfff/.lastflushedseqids 2024-11-24T18:54:26,595 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33433 is added to blk_1073741846_1030 (size=120) 2024-11-24T18:54:26,596 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43525 is added to blk_1073741846_1030 (size=120) 2024-11-24T18:54:26,596 INFO [M:0;f2b92657890a:39263 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-24T18:54:26,597 INFO [M:0;f2b92657890a:39263 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-24T18:54:26,597 DEBUG [M:0;f2b92657890a:39263 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-24T18:54:26,597 INFO [M:0;f2b92657890a:39263 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T18:54:26,597 DEBUG [M:0;f2b92657890a:39263 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T18:54:26,597 DEBUG [M:0;f2b92657890a:39263 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-24T18:54:26,597 DEBUG [M:0;f2b92657890a:39263 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T18:54:26,597 INFO [M:0;f2b92657890a:39263 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.18 KB heapSize=29.16 KB 2024-11-24T18:54:26,598 ERROR [FSHLog-0-hdfs://localhost:45793/user/jenkins/test-data/c800237f-d88d-dd75-c687-231156bbbfff/MasterData-prefix:f2b92657890a,39263,1732474435950 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45717,DS-d0cc9942-eed6-414e-ae88-9ba9bf2fa8aa,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T18:54:26,598 WARN [FSHLog-0-hdfs://localhost:45793/user/jenkins/test-data/c800237f-d88d-dd75-c687-231156bbbfff/MasterData-prefix:f2b92657890a,39263,1732474435950 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45717,DS-d0cc9942-eed6-414e-ae88-9ba9bf2fa8aa,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T18:54:26,598 DEBUG [master:store-WAL-Roller {}] wal.AbstractWALRoller(198): WAL FSHLog f2b92657890a%2C39263%2C1732474435950:(num 1732474436257) roll requested 2024-11-24T18:54:26,598 INFO [master:store-WAL-Roller {}] monitor.StreamSlowMonitor(122): New stream slow monitor f2b92657890a%2C39263%2C1732474435950.1732474466598 2024-11-24T18:54:26,604 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:54:26,604 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:54:26,604 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:54:26,604 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:54:26,604 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:54:26,605 INFO [master:store-WAL-Roller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/c800237f-d88d-dd75-c687-231156bbbfff/MasterData/WALs/f2b92657890a,39263,1732474435950/f2b92657890a%2C39263%2C1732474435950.1732474436257 with entries=53, filesize=26.63 KB; new WAL /user/jenkins/test-data/c800237f-d88d-dd75-c687-231156bbbfff/MasterData/WALs/f2b92657890a,39263,1732474435950/f2b92657890a%2C39263%2C1732474435950.1732474466598 2024-11-24T18:54:26,605 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45717,DS-d0cc9942-eed6-414e-ae88-9ba9bf2fa8aa,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T18:54:26,605 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45717,DS-d0cc9942-eed6-414e-ae88-9ba9bf2fa8aa,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T18:54:26,605 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:45793/user/jenkins/test-data/c800237f-d88d-dd75-c687-231156bbbfff/MasterData/WALs/f2b92657890a,39263,1732474435950/f2b92657890a%2C39263%2C1732474435950.1732474436257 2024-11-24T18:54:26,605 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38605:38605),(127.0.0.1/127.0.0.1:39835:39835)] 2024-11-24T18:54:26,606 WARN [IPC Server handler 3 on default port 45793 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/c800237f-d88d-dd75-c687-231156bbbfff/MasterData/WALs/f2b92657890a,39263,1732474435950/f2b92657890a%2C39263%2C1732474435950.1732474436257 has not been closed. Lease recovery is in progress. RecoveryId = 1032 for block blk_1073741830_1013 2024-11-24T18:54:26,606 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(879): hdfs://localhost:45793/user/jenkins/test-data/c800237f-d88d-dd75-c687-231156bbbfff/MasterData/WALs/f2b92657890a,39263,1732474435950/f2b92657890a%2C39263%2C1732474435950.1732474436257 is not closed yet, will try archiving it next time 2024-11-24T18:54:26,606 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:45793/user/jenkins/test-data/c800237f-d88d-dd75-c687-231156bbbfff/MasterData/WALs/f2b92657890a,39263,1732474435950/f2b92657890a%2C39263%2C1732474435950.1732474436257 after 1ms 2024-11-24T18:54:26,625 DEBUG [M:0;f2b92657890a:39263 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45793/user/jenkins/test-data/c800237f-d88d-dd75-c687-231156bbbfff/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/830fa239f2be45b1a4ff9d9dc8f8e8ca is 82, key is hbase:meta,,1/info:regioninfo/1732474437309/Put/seqid=0 2024-11-24T18:54:26,629 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43525 is added to blk_1073741848_1033 (size=5672) 2024-11-24T18:54:26,629 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33433 is added to blk_1073741848_1033 (size=5672) 2024-11-24T18:54:26,630 INFO [M:0;f2b92657890a:39263 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:45793/user/jenkins/test-data/c800237f-d88d-dd75-c687-231156bbbfff/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/830fa239f2be45b1a4ff9d9dc8f8e8ca 2024-11-24T18:54:26,650 DEBUG [M:0;f2b92657890a:39263 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45793/user/jenkins/test-data/c800237f-d88d-dd75-c687-231156bbbfff/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/c43a68ed6f1142b3859d7b31348b8f3c is 779, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1732474437865/Put/seqid=0 2024-11-24T18:54:26,655 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33433 is added to blk_1073741849_1034 (size=6119) 2024-11-24T18:54:26,655 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43525 is added to blk_1073741849_1034 (size=6119) 2024-11-24T18:54:26,655 INFO [M:0;f2b92657890a:39263 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.58 KB at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:45793/user/jenkins/test-data/c800237f-d88d-dd75-c687-231156bbbfff/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/c43a68ed6f1142b3859d7b31348b8f3c 2024-11-24T18:54:26,664 INFO [RS:0;f2b92657890a:40185 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-24T18:54:26,664 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40185-0x1016e31f5020001, quorum=127.0.0.1:59413, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-24T18:54:26,664 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40185-0x1016e31f5020001, quorum=127.0.0.1:59413, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-24T18:54:26,664 INFO [RS:0;f2b92657890a:40185 {}] regionserver.HRegionServer(1031): Exiting; stopping=f2b92657890a,40185,1732474436122; zookeeper connection closed. 2024-11-24T18:54:26,664 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@1573d2d7 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@1573d2d7 2024-11-24T18:54:26,664 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-24T18:54:26,680 DEBUG [M:0;f2b92657890a:39263 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45793/user/jenkins/test-data/c800237f-d88d-dd75-c687-231156bbbfff/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/93c8ab619a5e4754accebb7b0c76d5a4 is 69, key is f2b92657890a,40185,1732474436122/rs:state/1732474436676/Put/seqid=0 2024-11-24T18:54:26,685 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33433 is added to blk_1073741850_1035 (size=5156) 2024-11-24T18:54:26,685 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43525 is added to blk_1073741850_1035 (size=5156) 2024-11-24T18:54:26,685 INFO [M:0;f2b92657890a:39263 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:45793/user/jenkins/test-data/c800237f-d88d-dd75-c687-231156bbbfff/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/93c8ab619a5e4754accebb7b0c76d5a4 2024-11-24T18:54:26,704 DEBUG [M:0;f2b92657890a:39263 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45793/user/jenkins/test-data/c800237f-d88d-dd75-c687-231156bbbfff/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/c7f630a1580e4960b116ec4704370fdd is 52, key is load_balancer_on/state:d/1732474437467/Put/seqid=0 2024-11-24T18:54:26,708 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33433 is added to blk_1073741851_1036 (size=5056) 2024-11-24T18:54:26,709 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43525 is added to blk_1073741851_1036 (size=5056) 2024-11-24T18:54:26,709 INFO [M:0;f2b92657890a:39263 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:45793/user/jenkins/test-data/c800237f-d88d-dd75-c687-231156bbbfff/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/c7f630a1580e4960b116ec4704370fdd 2024-11-24T18:54:26,714 DEBUG [M:0;f2b92657890a:39263 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45793/user/jenkins/test-data/c800237f-d88d-dd75-c687-231156bbbfff/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/830fa239f2be45b1a4ff9d9dc8f8e8ca as hdfs://localhost:45793/user/jenkins/test-data/c800237f-d88d-dd75-c687-231156bbbfff/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/830fa239f2be45b1a4ff9d9dc8f8e8ca 2024-11-24T18:54:26,719 INFO [M:0;f2b92657890a:39263 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45793/user/jenkins/test-data/c800237f-d88d-dd75-c687-231156bbbfff/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/830fa239f2be45b1a4ff9d9dc8f8e8ca, entries=8, sequenceid=56, filesize=5.5 K 2024-11-24T18:54:26,720 DEBUG [M:0;f2b92657890a:39263 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45793/user/jenkins/test-data/c800237f-d88d-dd75-c687-231156bbbfff/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/c43a68ed6f1142b3859d7b31348b8f3c as hdfs://localhost:45793/user/jenkins/test-data/c800237f-d88d-dd75-c687-231156bbbfff/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/c43a68ed6f1142b3859d7b31348b8f3c 2024-11-24T18:54:26,726 INFO [M:0;f2b92657890a:39263 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45793/user/jenkins/test-data/c800237f-d88d-dd75-c687-231156bbbfff/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/c43a68ed6f1142b3859d7b31348b8f3c, entries=6, sequenceid=56, filesize=6.0 K 2024-11-24T18:54:26,727 DEBUG [M:0;f2b92657890a:39263 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45793/user/jenkins/test-data/c800237f-d88d-dd75-c687-231156bbbfff/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/93c8ab619a5e4754accebb7b0c76d5a4 as hdfs://localhost:45793/user/jenkins/test-data/c800237f-d88d-dd75-c687-231156bbbfff/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/93c8ab619a5e4754accebb7b0c76d5a4 2024-11-24T18:54:26,731 INFO [M:0;f2b92657890a:39263 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45793/user/jenkins/test-data/c800237f-d88d-dd75-c687-231156bbbfff/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/93c8ab619a5e4754accebb7b0c76d5a4, entries=1, sequenceid=56, filesize=5.0 K 2024-11-24T18:54:26,733 DEBUG [M:0;f2b92657890a:39263 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45793/user/jenkins/test-data/c800237f-d88d-dd75-c687-231156bbbfff/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/c7f630a1580e4960b116ec4704370fdd as hdfs://localhost:45793/user/jenkins/test-data/c800237f-d88d-dd75-c687-231156bbbfff/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/c7f630a1580e4960b116ec4704370fdd 2024-11-24T18:54:26,738 INFO [M:0;f2b92657890a:39263 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45793/user/jenkins/test-data/c800237f-d88d-dd75-c687-231156bbbfff/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/c7f630a1580e4960b116ec4704370fdd, entries=1, sequenceid=56, filesize=4.9 K 2024-11-24T18:54:26,739 INFO [M:0;f2b92657890a:39263 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.18 KB/23738, heapSize ~29.10 KB/29800, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 142ms, sequenceid=56, compaction requested=false 2024-11-24T18:54:26,740 INFO [M:0;f2b92657890a:39263 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T18:54:26,740 DEBUG [M:0;f2b92657890a:39263 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732474466597Disabling compacts and flushes for region at 1732474466597Disabling writes for close at 1732474466597Obtaining lock to block concurrent updates at 1732474466597Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732474466597Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23738, getHeapSize=29800, getOffHeapSize=0, getCellsCount=67 at 1732474466598 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732474466606 (+8 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732474466606Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732474466624 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732474466624Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732474466636 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732474466649 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732474466649Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732474466660 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732474466679 (+19 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732474466679Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1732474466690 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1732474466703 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1732474466703Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@40c692e7: reopening flushed file at 1732474466713 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7a195b5e: reopening flushed file at 1732474466719 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2b8da1e8: reopening flushed file at 1732474466726 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2a9909aa: reopening flushed file at 1732474466732 (+6 ms)Finished flush of dataSize ~23.18 KB/23738, heapSize ~29.10 KB/29800, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 142ms, sequenceid=56, compaction requested=false at 1732474466739 (+7 ms)Writing region close event to WAL at 1732474466740 (+1 ms)Closed at 1732474466740 2024-11-24T18:54:26,741 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:54:26,741 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:54:26,741 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:54:26,741 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:54:26,741 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:54:26,742 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43525 is added to blk_1073741847_1031 (size=757) 2024-11-24T18:54:26,743 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33433 is added to blk_1073741847_1031 (size=757) 2024-11-24T18:54:27,424 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,43977,1732474387094/f2b92657890a%2C43977%2C1732474387094.1732474387326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:54:27,448 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.meta.1732474386931.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:54:27,682 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:54:27,683 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:54:27,705 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:54:27,705 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:54:27,705 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:54:27,705 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:54:27,705 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:54:27,706 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:54:27,709 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:54:27,709 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:54:27,709 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:54:27,711 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:54:27,714 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:54:27,714 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:54:27,992 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741830_1013: GenerationStamp not matched, existing replica is blk_1073741830_1006 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-24T18:54:28,217 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-24T18:54:28,221 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:54:28,222 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:54:28,222 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:54:28,223 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:54:28,242 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:54:28,242 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:54:28,242 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:54:28,243 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:54:28,243 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:54:28,243 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:54:28,246 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:54:28,246 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:54:28,247 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:54:28,249 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:54:28,327 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-24T18:54:28,328 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-24T18:54:28,328 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-24T18:54:28,328 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart 2024-11-24T18:54:28,425 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,43977,1732474387094/f2b92657890a%2C43977%2C1732474387094.1732474387326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:54:28,449 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.meta.1732474386931.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:54:29,426 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,43977,1732474387094/f2b92657890a%2C43977%2C1732474387094.1732474387326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:54:29,450 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.meta.1732474386931.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:54:30,427 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,43977,1732474387094/f2b92657890a%2C43977%2C1732474387094.1732474387326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:54:30,451 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.meta.1732474386931.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:54:30,607 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:45793/user/jenkins/test-data/c800237f-d88d-dd75-c687-231156bbbfff/MasterData/WALs/f2b92657890a,39263,1732474435950/f2b92657890a%2C39263%2C1732474435950.1732474436257 after 4002ms 2024-11-24T18:54:30,608 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:45793/user/jenkins/test-data/c800237f-d88d-dd75-c687-231156bbbfff/MasterData/WALs/f2b92657890a,39263,1732474435950/f2b92657890a%2C39263%2C1732474435950.1732474436257 to hdfs://localhost:45793/user/jenkins/test-data/c800237f-d88d-dd75-c687-231156bbbfff/MasterData/oldWALs/f2b92657890a%2C39263%2C1732474435950.1732474436257 2024-11-24T18:54:30,612 INFO [WAL-Archive-0 {}] region.MasterRegionUtils(50): Moved hdfs://localhost:45793/user/jenkins/test-data/c800237f-d88d-dd75-c687-231156bbbfff/MasterData/oldWALs/f2b92657890a%2C39263%2C1732474435950.1732474436257 to hdfs://localhost:45793/user/jenkins/test-data/c800237f-d88d-dd75-c687-231156bbbfff/oldWALs/f2b92657890a%2C39263%2C1732474435950.1732474436257$masterlocalwal$ 2024-11-24T18:54:30,612 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-24T18:54:30,612 INFO [M:0;f2b92657890a:39263 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-24T18:54:30,612 INFO [M:0;f2b92657890a:39263 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:39263 2024-11-24T18:54:30,613 INFO [M:0;f2b92657890a:39263 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-24T18:54:30,864 INFO [M:0;f2b92657890a:39263 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-24T18:54:30,864 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39263-0x1016e31f5020000, quorum=127.0.0.1:59413, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-24T18:54:30,864 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39263-0x1016e31f5020000, quorum=127.0.0.1:59413, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-24T18:54:30,867 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5b16a6e2{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T18:54:30,867 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@27b0e5fe{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-24T18:54:30,867 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-24T18:54:30,867 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6c39138a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-24T18:54:30,868 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3df3f65e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c9f31953-3e74-7f96-380c-dd31e3afaa08/hadoop.log.dir/,STOPPED} 2024-11-24T18:54:30,869 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-24T18:54:30,869 WARN [BP-1026925338-172.17.0.2-1732474433586 heartbeating to localhost/127.0.0.1:45793 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-24T18:54:30,869 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-24T18:54:30,869 WARN [BP-1026925338-172.17.0.2-1732474433586 heartbeating to localhost/127.0.0.1:45793 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1026925338-172.17.0.2-1732474433586 (Datanode Uuid e83292c8-aad5-41af-a651-d4bb377cfa6a) service to localhost/127.0.0.1:45793 2024-11-24T18:54:30,870 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c9f31953-3e74-7f96-380c-dd31e3afaa08/cluster_c7dc029b-5c53-012f-f1b6-357157c49622/data/data3/current/BP-1026925338-172.17.0.2-1732474433586 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T18:54:30,870 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c9f31953-3e74-7f96-380c-dd31e3afaa08/cluster_c7dc029b-5c53-012f-f1b6-357157c49622/data/data4/current/BP-1026925338-172.17.0.2-1732474433586 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T18:54:30,870 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-24T18:54:30,873 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4778d192{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T18:54:30,874 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@76a8c1ee{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-24T18:54:30,874 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-24T18:54:30,874 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@19016e01{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-24T18:54:30,874 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@22abc971{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c9f31953-3e74-7f96-380c-dd31e3afaa08/hadoop.log.dir/,STOPPED} 2024-11-24T18:54:30,875 WARN [BP-1026925338-172.17.0.2-1732474433586 heartbeating to localhost/127.0.0.1:45793 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-24T18:54:30,875 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-24T18:54:30,875 WARN [BP-1026925338-172.17.0.2-1732474433586 heartbeating to localhost/127.0.0.1:45793 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1026925338-172.17.0.2-1732474433586 (Datanode Uuid 54fb0458-8a6c-4eb6-ba27-59e0846caccf) service to localhost/127.0.0.1:45793 2024-11-24T18:54:30,875 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-24T18:54:30,876 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c9f31953-3e74-7f96-380c-dd31e3afaa08/cluster_c7dc029b-5c53-012f-f1b6-357157c49622/data/data1/current/BP-1026925338-172.17.0.2-1732474433586 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T18:54:30,876 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c9f31953-3e74-7f96-380c-dd31e3afaa08/cluster_c7dc029b-5c53-012f-f1b6-357157c49622/data/data2/current/BP-1026925338-172.17.0.2-1732474433586 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T18:54:30,877 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-24T18:54:30,883 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@249ce69c{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-24T18:54:30,884 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7c2762d6{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-24T18:54:30,884 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-24T18:54:30,884 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@25a29a07{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-24T18:54:30,884 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2e4cbcc2{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c9f31953-3e74-7f96-380c-dd31e3afaa08/hadoop.log.dir/,STOPPED} 2024-11-24T18:54:30,891 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-24T18:54:30,912 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-24T18:54:30,919 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnPipelineRestart Thread=182 (was 157) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:45793 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:45793 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:45793 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:45793 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-13 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-15 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-12 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:45793 from jenkins.hfs.4 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-33-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:45793 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-31-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-14 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:45793 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.4@localhost:45793 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=457 (was 448) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=201 (was 272), ProcessCount=11 (was 11), AvailableMemoryMB=8811 (was 8607) - AvailableMemoryMB LEAK? - 2024-11-24T18:54:30,926 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testCompactionRecordDoesntBlockRolling Thread=182, OpenFileDescriptor=457, MaxFileDescriptor=1048576, SystemLoadAverage=201, ProcessCount=11, AvailableMemoryMB=8811 2024-11-24T18:54:30,926 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-24T18:54:30,926 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c9f31953-3e74-7f96-380c-dd31e3afaa08/hadoop.log.dir so I do NOT create it in target/test-data/158c5696-4029-845c-c181-2e809f9880d1 2024-11-24T18:54:30,926 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c9f31953-3e74-7f96-380c-dd31e3afaa08/hadoop.tmp.dir so I do NOT create it in target/test-data/158c5696-4029-845c-c181-2e809f9880d1 2024-11-24T18:54:30,926 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/158c5696-4029-845c-c181-2e809f9880d1/cluster_fc72df8f-492b-1532-6ece-79f1d51753f3, deleteOnExit=true 2024-11-24T18:54:30,926 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-24T18:54:30,926 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/158c5696-4029-845c-c181-2e809f9880d1/test.cache.data in system properties and HBase conf 2024-11-24T18:54:30,926 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/158c5696-4029-845c-c181-2e809f9880d1/hadoop.tmp.dir in system properties and HBase conf 2024-11-24T18:54:30,926 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/158c5696-4029-845c-c181-2e809f9880d1/hadoop.log.dir in system properties and HBase conf 2024-11-24T18:54:30,927 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/158c5696-4029-845c-c181-2e809f9880d1/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-24T18:54:30,927 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/158c5696-4029-845c-c181-2e809f9880d1/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-24T18:54:30,927 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-24T18:54:30,927 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-24T18:54:30,927 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/158c5696-4029-845c-c181-2e809f9880d1/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-24T18:54:30,927 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/158c5696-4029-845c-c181-2e809f9880d1/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-24T18:54:30,927 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/158c5696-4029-845c-c181-2e809f9880d1/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-24T18:54:30,927 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/158c5696-4029-845c-c181-2e809f9880d1/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-24T18:54:30,927 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/158c5696-4029-845c-c181-2e809f9880d1/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-24T18:54:30,927 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/158c5696-4029-845c-c181-2e809f9880d1/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-24T18:54:30,927 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/158c5696-4029-845c-c181-2e809f9880d1/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-24T18:54:30,927 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/158c5696-4029-845c-c181-2e809f9880d1/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-24T18:54:30,928 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/158c5696-4029-845c-c181-2e809f9880d1/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-24T18:54:30,928 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/158c5696-4029-845c-c181-2e809f9880d1/nfs.dump.dir in system properties and HBase conf 2024-11-24T18:54:30,928 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/158c5696-4029-845c-c181-2e809f9880d1/java.io.tmpdir in system properties and HBase conf 2024-11-24T18:54:30,928 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/158c5696-4029-845c-c181-2e809f9880d1/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-24T18:54:30,928 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/158c5696-4029-845c-c181-2e809f9880d1/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-24T18:54:30,928 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/158c5696-4029-845c-c181-2e809f9880d1/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-24T18:54:30,941 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-24T18:54:31,409 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T18:54:31,413 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-24T18:54:31,414 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-24T18:54:31,414 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-24T18:54:31,414 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-24T18:54:31,415 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T18:54:31,415 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@8d4c846{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/158c5696-4029-845c-c181-2e809f9880d1/hadoop.log.dir/,AVAILABLE} 2024-11-24T18:54:31,415 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@20e4ef1d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-24T18:54:31,428 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,43977,1732474387094/f2b92657890a%2C43977%2C1732474387094.1732474387326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:54:31,451 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.meta.1732474386931.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:54:31,511 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5d8d7f9b{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/158c5696-4029-845c-c181-2e809f9880d1/java.io.tmpdir/jetty-localhost-44519-hadoop-hdfs-3_4_1-tests_jar-_-any-6163502563398892700/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-24T18:54:31,512 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2c020752{HTTP/1.1, (http/1.1)}{localhost:44519} 2024-11-24T18:54:31,512 INFO [Time-limited test {}] server.Server(415): Started @200864ms 2024-11-24T18:54:31,523 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-24T18:54:31,806 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T18:54:31,809 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-24T18:54:31,810 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-24T18:54:31,810 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-24T18:54:31,810 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-24T18:54:31,811 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7a2ef153{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/158c5696-4029-845c-c181-2e809f9880d1/hadoop.log.dir/,AVAILABLE} 2024-11-24T18:54:31,811 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7b079ea2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-24T18:54:31,903 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@e6bebf5{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/158c5696-4029-845c-c181-2e809f9880d1/java.io.tmpdir/jetty-localhost-38613-hadoop-hdfs-3_4_1-tests_jar-_-any-16780292229585658572/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T18:54:31,904 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@261a9e0a{HTTP/1.1, (http/1.1)}{localhost:38613} 2024-11-24T18:54:31,904 INFO [Time-limited test {}] server.Server(415): Started @201256ms 2024-11-24T18:54:31,905 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-24T18:54:31,928 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T18:54:31,930 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-24T18:54:31,931 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-24T18:54:31,931 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-24T18:54:31,931 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-24T18:54:31,931 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3e2a30ba{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/158c5696-4029-845c-c181-2e809f9880d1/hadoop.log.dir/,AVAILABLE} 2024-11-24T18:54:31,931 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1b7fc8f3{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-24T18:54:32,025 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@65345c29{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/158c5696-4029-845c-c181-2e809f9880d1/java.io.tmpdir/jetty-localhost-43261-hadoop-hdfs-3_4_1-tests_jar-_-any-2496505693908582216/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T18:54:32,025 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@36f52998{HTTP/1.1, (http/1.1)}{localhost:43261} 2024-11-24T18:54:32,025 INFO [Time-limited test {}] server.Server(415): Started @201378ms 2024-11-24T18:54:32,026 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-24T18:54:32,428 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,43977,1732474387094/f2b92657890a%2C43977%2C1732474387094.1732474387326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:54:32,452 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.meta.1732474386931.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:54:33,053 WARN [Thread-1649 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/158c5696-4029-845c-c181-2e809f9880d1/cluster_fc72df8f-492b-1532-6ece-79f1d51753f3/data/data1/current/BP-1536927535-172.17.0.2-1732474470951/current, will proceed with Du for space computation calculation, 2024-11-24T18:54:33,053 WARN [Thread-1650 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/158c5696-4029-845c-c181-2e809f9880d1/cluster_fc72df8f-492b-1532-6ece-79f1d51753f3/data/data2/current/BP-1536927535-172.17.0.2-1732474470951/current, will proceed with Du for space computation calculation, 2024-11-24T18:54:33,077 WARN [Thread-1613 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-24T18:54:33,080 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xf3ccd064611324bf with lease ID 0xb079c3386082a305: Processing first storage report for DS-71bf0351-531a-4e1d-8e6c-edef7b577a2a from datanode DatanodeRegistration(127.0.0.1:41449, datanodeUuid=93b93bea-d2c1-469c-9f3c-763bcade36b9, infoPort=39379, infoSecurePort=0, ipcPort=43509, storageInfo=lv=-57;cid=testClusterID;nsid=367522599;c=1732474470951) 2024-11-24T18:54:33,080 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf3ccd064611324bf with lease ID 0xb079c3386082a305: from storage DS-71bf0351-531a-4e1d-8e6c-edef7b577a2a node DatanodeRegistration(127.0.0.1:41449, datanodeUuid=93b93bea-d2c1-469c-9f3c-763bcade36b9, infoPort=39379, infoSecurePort=0, ipcPort=43509, storageInfo=lv=-57;cid=testClusterID;nsid=367522599;c=1732474470951), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T18:54:33,080 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xf3ccd064611324bf with lease ID 0xb079c3386082a305: Processing first storage report for DS-3718e4c6-0cc7-48ba-83c8-da3a5259cc06 from datanode DatanodeRegistration(127.0.0.1:41449, datanodeUuid=93b93bea-d2c1-469c-9f3c-763bcade36b9, infoPort=39379, infoSecurePort=0, ipcPort=43509, storageInfo=lv=-57;cid=testClusterID;nsid=367522599;c=1732474470951) 2024-11-24T18:54:33,080 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf3ccd064611324bf with lease ID 0xb079c3386082a305: from storage DS-3718e4c6-0cc7-48ba-83c8-da3a5259cc06 node DatanodeRegistration(127.0.0.1:41449, datanodeUuid=93b93bea-d2c1-469c-9f3c-763bcade36b9, infoPort=39379, infoSecurePort=0, ipcPort=43509, storageInfo=lv=-57;cid=testClusterID;nsid=367522599;c=1732474470951), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T18:54:33,189 WARN [Thread-1661 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/158c5696-4029-845c-c181-2e809f9880d1/cluster_fc72df8f-492b-1532-6ece-79f1d51753f3/data/data4/current/BP-1536927535-172.17.0.2-1732474470951/current, will proceed with Du for space computation calculation, 2024-11-24T18:54:33,189 WARN [Thread-1660 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/158c5696-4029-845c-c181-2e809f9880d1/cluster_fc72df8f-492b-1532-6ece-79f1d51753f3/data/data3/current/BP-1536927535-172.17.0.2-1732474470951/current, will proceed with Du for space computation calculation, 2024-11-24T18:54:33,207 WARN [Thread-1636 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-24T18:54:33,209 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xf3a3fe8b0668820f with lease ID 0xb079c3386082a306: Processing first storage report for DS-94f4f083-4ce6-4c21-b8db-d9fac778efc6 from datanode DatanodeRegistration(127.0.0.1:44711, datanodeUuid=473c356e-3ce1-4bb3-9474-a0d8871fcc79, infoPort=42865, infoSecurePort=0, ipcPort=45695, storageInfo=lv=-57;cid=testClusterID;nsid=367522599;c=1732474470951) 2024-11-24T18:54:33,209 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf3a3fe8b0668820f with lease ID 0xb079c3386082a306: from storage DS-94f4f083-4ce6-4c21-b8db-d9fac778efc6 node DatanodeRegistration(127.0.0.1:44711, datanodeUuid=473c356e-3ce1-4bb3-9474-a0d8871fcc79, infoPort=42865, infoSecurePort=0, ipcPort=45695, storageInfo=lv=-57;cid=testClusterID;nsid=367522599;c=1732474470951), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T18:54:33,209 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xf3a3fe8b0668820f with lease ID 0xb079c3386082a306: Processing first storage report for DS-d801480e-6765-4915-bec4-6f6dd792aef8 from datanode DatanodeRegistration(127.0.0.1:44711, datanodeUuid=473c356e-3ce1-4bb3-9474-a0d8871fcc79, infoPort=42865, infoSecurePort=0, ipcPort=45695, storageInfo=lv=-57;cid=testClusterID;nsid=367522599;c=1732474470951) 2024-11-24T18:54:33,209 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf3a3fe8b0668820f with lease ID 0xb079c3386082a306: from storage DS-d801480e-6765-4915-bec4-6f6dd792aef8 node DatanodeRegistration(127.0.0.1:44711, datanodeUuid=473c356e-3ce1-4bb3-9474-a0d8871fcc79, infoPort=42865, infoSecurePort=0, ipcPort=45695, storageInfo=lv=-57;cid=testClusterID;nsid=367522599;c=1732474470951), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T18:54:33,263 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/158c5696-4029-845c-c181-2e809f9880d1 2024-11-24T18:54:33,290 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/158c5696-4029-845c-c181-2e809f9880d1/cluster_fc72df8f-492b-1532-6ece-79f1d51753f3/zookeeper_0, clientPort=55864, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/158c5696-4029-845c-c181-2e809f9880d1/cluster_fc72df8f-492b-1532-6ece-79f1d51753f3/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/158c5696-4029-845c-c181-2e809f9880d1/cluster_fc72df8f-492b-1532-6ece-79f1d51753f3/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-24T18:54:33,291 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=55864 2024-11-24T18:54:33,292 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T18:54:33,294 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T18:54:33,304 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44711 is added to blk_1073741825_1001 (size=7) 2024-11-24T18:54:33,304 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41449 is added to blk_1073741825_1001 (size=7) 2024-11-24T18:54:33,305 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:42029/user/jenkins/test-data/1785304b-54f9-90ba-b91d-b1c10ec14b84 with version=8 2024-11-24T18:54:33,306 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:40437/user/jenkins/test-data/91bb223a-a964-b097-0799-aa34536414f2/hbase-staging 2024-11-24T18:54:33,308 INFO [Time-limited test {}] client.ConnectionUtils(128): master/f2b92657890a:0 server-side Connection retries=45 2024-11-24T18:54:33,308 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-24T18:54:33,308 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-24T18:54:33,308 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-24T18:54:33,308 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-24T18:54:33,308 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-24T18:54:33,308 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-24T18:54:33,308 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-24T18:54:33,309 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:46733 2024-11-24T18:54:33,311 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:46733 connecting to ZooKeeper ensemble=127.0.0.1:55864 2024-11-24T18:54:33,369 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:467330x0, quorum=127.0.0.1:55864, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-24T18:54:33,370 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:46733-0x1016e3286f40000 connected 2024-11-24T18:54:33,429 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,43977,1732474387094/f2b92657890a%2C43977%2C1732474387094.1732474387326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:54:33,447 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T18:54:33,449 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T18:54:33,450 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:46733-0x1016e3286f40000, quorum=127.0.0.1:55864, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-24T18:54:33,451 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:42029/user/jenkins/test-data/1785304b-54f9-90ba-b91d-b1c10ec14b84, hbase.cluster.distributed=false 2024-11-24T18:54:33,452 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:46733-0x1016e3286f40000, quorum=127.0.0.1:55864, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-24T18:54:33,452 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.meta.1732474386931.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:54:33,453 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=46733 2024-11-24T18:54:33,453 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=46733 2024-11-24T18:54:33,453 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=46733 2024-11-24T18:54:33,454 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=46733 2024-11-24T18:54:33,454 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=46733 2024-11-24T18:54:33,474 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/f2b92657890a:0 server-side Connection retries=45 2024-11-24T18:54:33,474 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-24T18:54:33,474 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-24T18:54:33,474 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-24T18:54:33,474 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-24T18:54:33,474 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-24T18:54:33,474 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-24T18:54:33,475 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-24T18:54:33,475 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:42587 2024-11-24T18:54:33,477 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:42587 connecting to ZooKeeper ensemble=127.0.0.1:55864 2024-11-24T18:54:33,477 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T18:54:33,478 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T18:54:33,489 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:425870x0, quorum=127.0.0.1:55864, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-24T18:54:33,490 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42587-0x1016e3286f40001, quorum=127.0.0.1:55864, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-24T18:54:33,490 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:42587-0x1016e3286f40001 connected 2024-11-24T18:54:33,490 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-24T18:54:33,491 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-24T18:54:33,491 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42587-0x1016e3286f40001, quorum=127.0.0.1:55864, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-24T18:54:33,492 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42587-0x1016e3286f40001, quorum=127.0.0.1:55864, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-24T18:54:33,492 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=42587 2024-11-24T18:54:33,492 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=42587 2024-11-24T18:54:33,492 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=42587 2024-11-24T18:54:33,493 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=42587 2024-11-24T18:54:33,493 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=42587 2024-11-24T18:54:33,506 DEBUG [M:0;f2b92657890a:46733 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;f2b92657890a:46733 2024-11-24T18:54:33,506 INFO [master/f2b92657890a:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/f2b92657890a,46733,1732474473307 2024-11-24T18:54:33,510 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42587-0x1016e3286f40001, quorum=127.0.0.1:55864, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-24T18:54:33,510 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46733-0x1016e3286f40000, quorum=127.0.0.1:55864, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-24T18:54:33,511 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:46733-0x1016e3286f40000, quorum=127.0.0.1:55864, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/f2b92657890a,46733,1732474473307 2024-11-24T18:54:33,521 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46733-0x1016e3286f40000, quorum=127.0.0.1:55864, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T18:54:33,521 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42587-0x1016e3286f40001, quorum=127.0.0.1:55864, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-24T18:54:33,521 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42587-0x1016e3286f40001, quorum=127.0.0.1:55864, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T18:54:33,521 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:46733-0x1016e3286f40000, quorum=127.0.0.1:55864, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-24T18:54:33,522 INFO [master/f2b92657890a:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/f2b92657890a,46733,1732474473307 from backup master directory 2024-11-24T18:54:33,531 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46733-0x1016e3286f40000, quorum=127.0.0.1:55864, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/f2b92657890a,46733,1732474473307 2024-11-24T18:54:33,531 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42587-0x1016e3286f40001, quorum=127.0.0.1:55864, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-24T18:54:33,532 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46733-0x1016e3286f40000, quorum=127.0.0.1:55864, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-24T18:54:33,532 WARN [master/f2b92657890a:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-24T18:54:33,532 INFO [master/f2b92657890a:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=f2b92657890a,46733,1732474473307 2024-11-24T18:54:33,537 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:42029/user/jenkins/test-data/1785304b-54f9-90ba-b91d-b1c10ec14b84/hbase.id] with ID: 7ceaebf5-ff5e-4036-b2e4-d120e47d2f34 2024-11-24T18:54:33,537 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:42029/user/jenkins/test-data/1785304b-54f9-90ba-b91d-b1c10ec14b84/.tmp/hbase.id 2024-11-24T18:54:33,545 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41449 is added to blk_1073741826_1002 (size=42) 2024-11-24T18:54:33,546 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44711 is added to blk_1073741826_1002 (size=42) 2024-11-24T18:54:33,546 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:42029/user/jenkins/test-data/1785304b-54f9-90ba-b91d-b1c10ec14b84/.tmp/hbase.id]:[hdfs://localhost:42029/user/jenkins/test-data/1785304b-54f9-90ba-b91d-b1c10ec14b84/hbase.id] 2024-11-24T18:54:33,564 INFO [master/f2b92657890a:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T18:54:33,564 INFO [master/f2b92657890a:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-24T18:54:33,567 INFO [master/f2b92657890a:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 3ms. 2024-11-24T18:54:33,574 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46733-0x1016e3286f40000, quorum=127.0.0.1:55864, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T18:54:33,574 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42587-0x1016e3286f40001, quorum=127.0.0.1:55864, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T18:54:33,584 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41449 is added to blk_1073741827_1003 (size=196) 2024-11-24T18:54:33,584 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44711 is added to blk_1073741827_1003 (size=196) 2024-11-24T18:54:33,585 INFO [master/f2b92657890a:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-24T18:54:33,586 INFO [master/f2b92657890a:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-24T18:54:33,586 INFO [master/f2b92657890a:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-24T18:54:33,595 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41449 is added to blk_1073741828_1004 (size=1189) 2024-11-24T18:54:33,595 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44711 is added to blk_1073741828_1004 (size=1189) 2024-11-24T18:54:33,596 INFO [master/f2b92657890a:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:42029/user/jenkins/test-data/1785304b-54f9-90ba-b91d-b1c10ec14b84/MasterData/data/master/store 2024-11-24T18:54:33,604 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41449 is added to blk_1073741829_1005 (size=34) 2024-11-24T18:54:33,604 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44711 is added to blk_1073741829_1005 (size=34) 2024-11-24T18:54:33,831 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-24T18:54:33,832 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:54:33,832 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:54:33,832 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:54:33,833 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:54:33,833 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:54:33,834 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:54:33,862 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:54:33,863 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:54:33,863 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:54:33,863 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:54:33,864 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:54:33,864 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:54:33,870 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:54:33,870 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:54:33,870 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:54:33,874 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:54:34,005 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T18:54:34,005 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-24T18:54:34,005 INFO [master/f2b92657890a:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T18:54:34,005 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T18:54:34,005 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-24T18:54:34,005 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T18:54:34,005 INFO [master/f2b92657890a:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T18:54:34,005 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732474474005Disabling compacts and flushes for region at 1732474474005Disabling writes for close at 1732474474005Writing region close event to WAL at 1732474474005Closed at 1732474474005 2024-11-24T18:54:34,006 WARN [master/f2b92657890a:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:42029/user/jenkins/test-data/1785304b-54f9-90ba-b91d-b1c10ec14b84/MasterData/data/master/store/.initializing 2024-11-24T18:54:34,007 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:42029/user/jenkins/test-data/1785304b-54f9-90ba-b91d-b1c10ec14b84/MasterData/WALs/f2b92657890a,46733,1732474473307 2024-11-24T18:54:34,009 INFO [master/f2b92657890a:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=f2b92657890a%2C46733%2C1732474473307, suffix=, logDir=hdfs://localhost:42029/user/jenkins/test-data/1785304b-54f9-90ba-b91d-b1c10ec14b84/MasterData/WALs/f2b92657890a,46733,1732474473307, archiveDir=hdfs://localhost:42029/user/jenkins/test-data/1785304b-54f9-90ba-b91d-b1c10ec14b84/MasterData/oldWALs, maxLogs=10 2024-11-24T18:54:34,010 INFO [master/f2b92657890a:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor f2b92657890a%2C46733%2C1732474473307.1732474474010 2024-11-24T18:54:34,017 INFO [master/f2b92657890a:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/1785304b-54f9-90ba-b91d-b1c10ec14b84/MasterData/WALs/f2b92657890a,46733,1732474473307/f2b92657890a%2C46733%2C1732474473307.1732474474010 2024-11-24T18:54:34,024 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42865:42865),(127.0.0.1/127.0.0.1:39379:39379)] 2024-11-24T18:54:34,026 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-24T18:54:34,026 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T18:54:34,027 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T18:54:34,027 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T18:54:34,031 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-24T18:54:34,033 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-24T18:54:34,033 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T18:54:34,033 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T18:54:34,033 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-24T18:54:34,035 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-24T18:54:34,035 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T18:54:34,035 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T18:54:34,035 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-24T18:54:34,037 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-24T18:54:34,037 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T18:54:34,037 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T18:54:34,037 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-24T18:54:34,039 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-24T18:54:34,039 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T18:54:34,043 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T18:54:34,044 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T18:54:34,045 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42029/user/jenkins/test-data/1785304b-54f9-90ba-b91d-b1c10ec14b84/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-24T18:54:34,045 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42029/user/jenkins/test-data/1785304b-54f9-90ba-b91d-b1c10ec14b84/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-24T18:54:34,047 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T18:54:34,047 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T18:54:34,048 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-24T18:54:34,049 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T18:54:34,052 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42029/user/jenkins/test-data/1785304b-54f9-90ba-b91d-b1c10ec14b84/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-24T18:54:34,053 INFO [master/f2b92657890a:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=790112, jitterRate=0.004680454730987549}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-24T18:54:34,053 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732474474027Initializing all the Stores at 1732474474028 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732474474028Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732474474031 (+3 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732474474031Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732474474031Cleaning up temporary data from old regions at 1732474474047 (+16 ms)Region opened successfully at 1732474474053 (+6 ms) 2024-11-24T18:54:34,054 INFO [master/f2b92657890a:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-24T18:54:34,058 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@cba92b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=f2b92657890a/172.17.0.2:0 2024-11-24T18:54:34,059 INFO [master/f2b92657890a:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-24T18:54:34,059 INFO [master/f2b92657890a:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-24T18:54:34,059 INFO [master/f2b92657890a:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-24T18:54:34,059 INFO [master/f2b92657890a:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-24T18:54:34,060 INFO [master/f2b92657890a:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-24T18:54:34,061 INFO [master/f2b92657890a:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-24T18:54:34,061 INFO [master/f2b92657890a:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-24T18:54:34,067 INFO [master/f2b92657890a:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-24T18:54:34,068 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46733-0x1016e3286f40000, quorum=127.0.0.1:55864, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-24T18:54:34,079 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-24T18:54:34,079 INFO [master/f2b92657890a:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-24T18:54:34,082 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46733-0x1016e3286f40000, quorum=127.0.0.1:55864, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-24T18:54:34,089 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-24T18:54:34,090 INFO [master/f2b92657890a:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-24T18:54:34,091 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46733-0x1016e3286f40000, quorum=127.0.0.1:55864, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-24T18:54:34,100 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-24T18:54:34,101 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46733-0x1016e3286f40000, quorum=127.0.0.1:55864, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-24T18:54:34,110 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-24T18:54:34,113 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46733-0x1016e3286f40000, quorum=127.0.0.1:55864, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-24T18:54:34,121 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-24T18:54:34,131 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42587-0x1016e3286f40001, quorum=127.0.0.1:55864, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-24T18:54:34,131 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46733-0x1016e3286f40000, quorum=127.0.0.1:55864, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-24T18:54:34,131 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46733-0x1016e3286f40000, quorum=127.0.0.1:55864, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T18:54:34,131 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42587-0x1016e3286f40001, quorum=127.0.0.1:55864, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T18:54:34,132 INFO [master/f2b92657890a:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=f2b92657890a,46733,1732474473307, sessionid=0x1016e3286f40000, setting cluster-up flag (Was=false) 2024-11-24T18:54:34,152 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46733-0x1016e3286f40000, quorum=127.0.0.1:55864, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T18:54:34,152 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42587-0x1016e3286f40001, quorum=127.0.0.1:55864, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T18:54:34,184 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-24T18:54:34,185 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=f2b92657890a,46733,1732474473307 2024-11-24T18:54:34,205 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46733-0x1016e3286f40000, quorum=127.0.0.1:55864, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T18:54:34,205 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42587-0x1016e3286f40001, quorum=127.0.0.1:55864, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T18:54:34,237 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-24T18:54:34,238 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=f2b92657890a,46733,1732474473307 2024-11-24T18:54:34,239 INFO [master/f2b92657890a:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:42029/user/jenkins/test-data/1785304b-54f9-90ba-b91d-b1c10ec14b84/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-24T18:54:34,241 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-24T18:54:34,241 INFO [master/f2b92657890a:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-24T18:54:34,241 INFO [master/f2b92657890a:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-24T18:54:34,241 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: f2b92657890a,46733,1732474473307 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-24T18:54:34,242 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/f2b92657890a:0, corePoolSize=5, maxPoolSize=5 2024-11-24T18:54:34,242 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/f2b92657890a:0, corePoolSize=5, maxPoolSize=5 2024-11-24T18:54:34,243 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/f2b92657890a:0, corePoolSize=5, maxPoolSize=5 2024-11-24T18:54:34,243 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/f2b92657890a:0, corePoolSize=5, maxPoolSize=5 2024-11-24T18:54:34,243 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/f2b92657890a:0, corePoolSize=10, maxPoolSize=10 2024-11-24T18:54:34,243 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/f2b92657890a:0, corePoolSize=1, maxPoolSize=1 2024-11-24T18:54:34,243 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/f2b92657890a:0, corePoolSize=2, maxPoolSize=2 2024-11-24T18:54:34,243 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/f2b92657890a:0, corePoolSize=1, maxPoolSize=1 2024-11-24T18:54:34,244 INFO [master/f2b92657890a:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732474504244 2024-11-24T18:54:34,244 INFO [master/f2b92657890a:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-24T18:54:34,244 INFO [master/f2b92657890a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-24T18:54:34,244 INFO [master/f2b92657890a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-24T18:54:34,244 INFO [master/f2b92657890a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-24T18:54:34,244 INFO [master/f2b92657890a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-24T18:54:34,244 INFO [master/f2b92657890a:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-24T18:54:34,244 INFO [master/f2b92657890a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-24T18:54:34,244 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-24T18:54:34,244 INFO [master/f2b92657890a:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-24T18:54:34,244 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-24T18:54:34,245 INFO [master/f2b92657890a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-24T18:54:34,245 INFO [master/f2b92657890a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-24T18:54:34,245 INFO [master/f2b92657890a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-24T18:54:34,245 INFO [master/f2b92657890a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-24T18:54:34,245 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/f2b92657890a:0:becomeActiveMaster-HFileCleaner.large.0-1732474474245,5,FailOnTimeoutGroup] 2024-11-24T18:54:34,245 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/f2b92657890a:0:becomeActiveMaster-HFileCleaner.small.0-1732474474245,5,FailOnTimeoutGroup] 2024-11-24T18:54:34,245 INFO [master/f2b92657890a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-24T18:54:34,245 INFO [master/f2b92657890a:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-24T18:54:34,245 INFO [master/f2b92657890a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-24T18:54:34,245 INFO [master/f2b92657890a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-24T18:54:34,246 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T18:54:34,246 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-24T18:54:34,253 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41449 is added to blk_1073741831_1007 (size=1321) 2024-11-24T18:54:34,253 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44711 is added to blk_1073741831_1007 (size=1321) 2024-11-24T18:54:34,254 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:42029/user/jenkins/test-data/1785304b-54f9-90ba-b91d-b1c10ec14b84/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-24T18:54:34,254 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:42029/user/jenkins/test-data/1785304b-54f9-90ba-b91d-b1c10ec14b84 2024-11-24T18:54:34,260 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41449 is added to blk_1073741832_1008 (size=32) 2024-11-24T18:54:34,261 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44711 is added to blk_1073741832_1008 (size=32) 2024-11-24T18:54:34,295 INFO [RS:0;f2b92657890a:42587 {}] regionserver.HRegionServer(746): ClusterId : 7ceaebf5-ff5e-4036-b2e4-d120e47d2f34 2024-11-24T18:54:34,296 DEBUG [RS:0;f2b92657890a:42587 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-24T18:54:34,301 DEBUG [RS:0;f2b92657890a:42587 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-24T18:54:34,301 DEBUG [RS:0;f2b92657890a:42587 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-24T18:54:34,311 DEBUG [RS:0;f2b92657890a:42587 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-24T18:54:34,311 DEBUG [RS:0;f2b92657890a:42587 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@10883625, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=f2b92657890a/172.17.0.2:0 2024-11-24T18:54:34,322 DEBUG [RS:0;f2b92657890a:42587 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;f2b92657890a:42587 2024-11-24T18:54:34,322 INFO [RS:0;f2b92657890a:42587 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-24T18:54:34,322 INFO [RS:0;f2b92657890a:42587 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-24T18:54:34,322 DEBUG [RS:0;f2b92657890a:42587 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-24T18:54:34,322 INFO [RS:0;f2b92657890a:42587 {}] regionserver.HRegionServer(2659): reportForDuty to master=f2b92657890a,46733,1732474473307 with port=42587, startcode=1732474473474 2024-11-24T18:54:34,323 DEBUG [RS:0;f2b92657890a:42587 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-24T18:54:34,324 INFO [HMaster-EventLoopGroup-12-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56623, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.5 (auth:SIMPLE), service=RegionServerStatusService 2024-11-24T18:54:34,325 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46733 {}] master.ServerManager(363): Checking decommissioned status of RegionServer f2b92657890a,42587,1732474473474 2024-11-24T18:54:34,325 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46733 {}] master.ServerManager(517): Registering regionserver=f2b92657890a,42587,1732474473474 2024-11-24T18:54:34,327 DEBUG [RS:0;f2b92657890a:42587 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:42029/user/jenkins/test-data/1785304b-54f9-90ba-b91d-b1c10ec14b84 2024-11-24T18:54:34,327 DEBUG [RS:0;f2b92657890a:42587 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:42029 2024-11-24T18:54:34,327 DEBUG [RS:0;f2b92657890a:42587 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-24T18:54:34,331 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46733-0x1016e3286f40000, quorum=127.0.0.1:55864, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-24T18:54:34,332 DEBUG [RS:0;f2b92657890a:42587 {}] zookeeper.ZKUtil(111): regionserver:42587-0x1016e3286f40001, quorum=127.0.0.1:55864, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/f2b92657890a,42587,1732474473474 2024-11-24T18:54:34,332 WARN [RS:0;f2b92657890a:42587 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-24T18:54:34,332 INFO [RS:0;f2b92657890a:42587 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-24T18:54:34,332 DEBUG [RS:0;f2b92657890a:42587 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:42029/user/jenkins/test-data/1785304b-54f9-90ba-b91d-b1c10ec14b84/WALs/f2b92657890a,42587,1732474473474 2024-11-24T18:54:34,332 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [f2b92657890a,42587,1732474473474] 2024-11-24T18:54:34,336 INFO [RS:0;f2b92657890a:42587 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-24T18:54:34,337 INFO [RS:0;f2b92657890a:42587 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-24T18:54:34,338 INFO [RS:0;f2b92657890a:42587 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-24T18:54:34,338 INFO [RS:0;f2b92657890a:42587 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T18:54:34,338 INFO [RS:0;f2b92657890a:42587 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-24T18:54:34,339 INFO [RS:0;f2b92657890a:42587 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-24T18:54:34,339 INFO [RS:0;f2b92657890a:42587 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-24T18:54:34,339 DEBUG [RS:0;f2b92657890a:42587 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/f2b92657890a:0, corePoolSize=1, maxPoolSize=1 2024-11-24T18:54:34,339 DEBUG [RS:0;f2b92657890a:42587 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/f2b92657890a:0, corePoolSize=1, maxPoolSize=1 2024-11-24T18:54:34,339 DEBUG [RS:0;f2b92657890a:42587 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/f2b92657890a:0, corePoolSize=1, maxPoolSize=1 2024-11-24T18:54:34,339 DEBUG [RS:0;f2b92657890a:42587 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/f2b92657890a:0, corePoolSize=1, maxPoolSize=1 2024-11-24T18:54:34,339 DEBUG [RS:0;f2b92657890a:42587 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/f2b92657890a:0, corePoolSize=1, maxPoolSize=1 2024-11-24T18:54:34,339 DEBUG [RS:0;f2b92657890a:42587 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/f2b92657890a:0, corePoolSize=2, maxPoolSize=2 2024-11-24T18:54:34,339 DEBUG [RS:0;f2b92657890a:42587 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/f2b92657890a:0, corePoolSize=1, maxPoolSize=1 2024-11-24T18:54:34,339 DEBUG [RS:0;f2b92657890a:42587 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/f2b92657890a:0, corePoolSize=1, maxPoolSize=1 2024-11-24T18:54:34,339 DEBUG [RS:0;f2b92657890a:42587 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/f2b92657890a:0, corePoolSize=1, maxPoolSize=1 2024-11-24T18:54:34,339 DEBUG [RS:0;f2b92657890a:42587 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/f2b92657890a:0, corePoolSize=1, maxPoolSize=1 2024-11-24T18:54:34,339 DEBUG [RS:0;f2b92657890a:42587 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/f2b92657890a:0, corePoolSize=1, maxPoolSize=1 2024-11-24T18:54:34,339 DEBUG [RS:0;f2b92657890a:42587 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/f2b92657890a:0, corePoolSize=1, maxPoolSize=1 2024-11-24T18:54:34,339 DEBUG [RS:0;f2b92657890a:42587 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/f2b92657890a:0, corePoolSize=3, maxPoolSize=3 2024-11-24T18:54:34,339 DEBUG [RS:0;f2b92657890a:42587 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/f2b92657890a:0, corePoolSize=3, maxPoolSize=3 2024-11-24T18:54:34,340 INFO [RS:0;f2b92657890a:42587 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-24T18:54:34,340 INFO [RS:0;f2b92657890a:42587 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-24T18:54:34,340 INFO [RS:0;f2b92657890a:42587 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T18:54:34,340 INFO [RS:0;f2b92657890a:42587 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-24T18:54:34,340 INFO [RS:0;f2b92657890a:42587 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-24T18:54:34,340 INFO [RS:0;f2b92657890a:42587 {}] hbase.ChoreService(168): Chore ScheduledChore name=f2b92657890a,42587,1732474473474-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-24T18:54:34,354 INFO [RS:0;f2b92657890a:42587 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-24T18:54:34,354 INFO [RS:0;f2b92657890a:42587 {}] hbase.ChoreService(168): Chore ScheduledChore name=f2b92657890a,42587,1732474473474-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T18:54:34,354 INFO [RS:0;f2b92657890a:42587 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T18:54:34,354 INFO [RS:0;f2b92657890a:42587 {}] regionserver.Replication(171): f2b92657890a,42587,1732474473474 started 2024-11-24T18:54:34,367 INFO [RS:0;f2b92657890a:42587 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T18:54:34,367 INFO [RS:0;f2b92657890a:42587 {}] regionserver.HRegionServer(1482): Serving as f2b92657890a,42587,1732474473474, RpcServer on f2b92657890a/172.17.0.2:42587, sessionid=0x1016e3286f40001 2024-11-24T18:54:34,367 DEBUG [RS:0;f2b92657890a:42587 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-24T18:54:34,367 DEBUG [RS:0;f2b92657890a:42587 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager f2b92657890a,42587,1732474473474 2024-11-24T18:54:34,367 DEBUG [RS:0;f2b92657890a:42587 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'f2b92657890a,42587,1732474473474' 2024-11-24T18:54:34,367 DEBUG [RS:0;f2b92657890a:42587 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-24T18:54:34,368 DEBUG [RS:0;f2b92657890a:42587 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-24T18:54:34,368 DEBUG [RS:0;f2b92657890a:42587 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-24T18:54:34,368 DEBUG [RS:0;f2b92657890a:42587 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-24T18:54:34,368 DEBUG [RS:0;f2b92657890a:42587 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager f2b92657890a,42587,1732474473474 2024-11-24T18:54:34,368 DEBUG [RS:0;f2b92657890a:42587 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'f2b92657890a,42587,1732474473474' 2024-11-24T18:54:34,368 DEBUG [RS:0;f2b92657890a:42587 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-24T18:54:34,369 DEBUG [RS:0;f2b92657890a:42587 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-24T18:54:34,369 DEBUG [RS:0;f2b92657890a:42587 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-24T18:54:34,369 INFO [RS:0;f2b92657890a:42587 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-24T18:54:34,369 INFO [RS:0;f2b92657890a:42587 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-24T18:54:34,429 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,43977,1732474387094/f2b92657890a%2C43977%2C1732474387094.1732474387326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:54:34,453 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.meta.1732474386931.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:54:34,471 INFO [RS:0;f2b92657890a:42587 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=f2b92657890a%2C42587%2C1732474473474, suffix=, logDir=hdfs://localhost:42029/user/jenkins/test-data/1785304b-54f9-90ba-b91d-b1c10ec14b84/WALs/f2b92657890a,42587,1732474473474, archiveDir=hdfs://localhost:42029/user/jenkins/test-data/1785304b-54f9-90ba-b91d-b1c10ec14b84/oldWALs, maxLogs=32 2024-11-24T18:54:34,471 INFO [RS:0;f2b92657890a:42587 {}] monitor.StreamSlowMonitor(122): New stream slow monitor f2b92657890a%2C42587%2C1732474473474.1732474474471 2024-11-24T18:54:34,477 INFO [RS:0;f2b92657890a:42587 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/1785304b-54f9-90ba-b91d-b1c10ec14b84/WALs/f2b92657890a,42587,1732474473474/f2b92657890a%2C42587%2C1732474473474.1732474474471 2024-11-24T18:54:34,480 DEBUG [RS:0;f2b92657890a:42587 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42865:42865),(127.0.0.1/127.0.0.1:39379:39379)] 2024-11-24T18:54:34,662 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T18:54:34,663 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-24T18:54:34,664 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-24T18:54:34,664 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T18:54:34,665 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T18:54:34,665 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-24T18:54:34,666 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-24T18:54:34,666 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T18:54:34,666 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T18:54:34,666 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-24T18:54:34,667 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-24T18:54:34,667 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T18:54:34,668 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T18:54:34,668 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-24T18:54:34,669 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-24T18:54:34,669 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T18:54:34,669 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T18:54:34,669 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-24T18:54:34,670 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42029/user/jenkins/test-data/1785304b-54f9-90ba-b91d-b1c10ec14b84/data/hbase/meta/1588230740 2024-11-24T18:54:34,670 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42029/user/jenkins/test-data/1785304b-54f9-90ba-b91d-b1c10ec14b84/data/hbase/meta/1588230740 2024-11-24T18:54:34,672 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-24T18:54:34,672 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-24T18:54:34,672 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-24T18:54:34,673 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-24T18:54:34,675 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42029/user/jenkins/test-data/1785304b-54f9-90ba-b91d-b1c10ec14b84/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-24T18:54:34,675 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=826393, jitterRate=0.050814151763916016}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-24T18:54:34,676 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732474474662Initializing all the Stores at 1732474474663 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732474474663Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732474474663Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732474474663Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732474474663Cleaning up temporary data from old regions at 1732474474672 (+9 ms)Region opened successfully at 1732474474676 (+4 ms) 2024-11-24T18:54:34,676 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-24T18:54:34,676 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-24T18:54:34,676 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-24T18:54:34,676 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-24T18:54:34,676 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-24T18:54:34,677 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-24T18:54:34,677 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732474474676Disabling compacts and flushes for region at 1732474474676Disabling writes for close at 1732474474676Writing region close event to WAL at 1732474474677 (+1 ms)Closed at 1732474474677 2024-11-24T18:54:34,678 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-24T18:54:34,678 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-24T18:54:34,679 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-24T18:54:34,680 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-24T18:54:34,681 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-24T18:54:34,831 DEBUG [f2b92657890a:46733 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-24T18:54:34,832 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=f2b92657890a,42587,1732474473474 2024-11-24T18:54:34,834 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as f2b92657890a,42587,1732474473474, state=OPENING 2024-11-24T18:54:34,879 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-24T18:54:34,889 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42587-0x1016e3286f40001, quorum=127.0.0.1:55864, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T18:54:34,889 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46733-0x1016e3286f40000, quorum=127.0.0.1:55864, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T18:54:34,890 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-24T18:54:34,890 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-24T18:54:34,890 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-24T18:54:34,891 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=f2b92657890a,42587,1732474473474}] 2024-11-24T18:54:35,046 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-24T18:54:35,051 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42523, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-24T18:54:35,057 INFO [RS_OPEN_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-24T18:54:35,057 INFO [RS_OPEN_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-24T18:54:35,060 INFO [RS_OPEN_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=f2b92657890a%2C42587%2C1732474473474.meta, suffix=.meta, logDir=hdfs://localhost:42029/user/jenkins/test-data/1785304b-54f9-90ba-b91d-b1c10ec14b84/WALs/f2b92657890a,42587,1732474473474, archiveDir=hdfs://localhost:42029/user/jenkins/test-data/1785304b-54f9-90ba-b91d-b1c10ec14b84/oldWALs, maxLogs=32 2024-11-24T18:54:35,060 INFO [RS_OPEN_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor f2b92657890a%2C42587%2C1732474473474.meta.1732474475060.meta 2024-11-24T18:54:35,066 INFO [RS_OPEN_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/1785304b-54f9-90ba-b91d-b1c10ec14b84/WALs/f2b92657890a,42587,1732474473474/f2b92657890a%2C42587%2C1732474473474.meta.1732474475060.meta 2024-11-24T18:54:35,069 DEBUG [RS_OPEN_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39379:39379),(127.0.0.1/127.0.0.1:42865:42865)] 2024-11-24T18:54:35,073 DEBUG [RS_OPEN_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-24T18:54:35,073 DEBUG [RS_OPEN_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-24T18:54:35,073 DEBUG [RS_OPEN_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-24T18:54:35,073 INFO [RS_OPEN_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-24T18:54:35,073 DEBUG [RS_OPEN_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-24T18:54:35,073 DEBUG [RS_OPEN_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T18:54:35,073 DEBUG [RS_OPEN_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-24T18:54:35,073 DEBUG [RS_OPEN_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-24T18:54:35,075 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-24T18:54:35,076 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-24T18:54:35,076 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T18:54:35,076 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T18:54:35,076 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-24T18:54:35,077 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-24T18:54:35,077 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T18:54:35,077 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T18:54:35,077 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-24T18:54:35,078 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-24T18:54:35,078 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T18:54:35,078 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T18:54:35,078 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-24T18:54:35,079 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-24T18:54:35,079 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T18:54:35,080 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T18:54:35,080 DEBUG [RS_OPEN_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-24T18:54:35,080 DEBUG [RS_OPEN_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42029/user/jenkins/test-data/1785304b-54f9-90ba-b91d-b1c10ec14b84/data/hbase/meta/1588230740 2024-11-24T18:54:35,081 DEBUG [RS_OPEN_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42029/user/jenkins/test-data/1785304b-54f9-90ba-b91d-b1c10ec14b84/data/hbase/meta/1588230740 2024-11-24T18:54:35,082 DEBUG [RS_OPEN_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-24T18:54:35,082 DEBUG [RS_OPEN_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-24T18:54:35,083 DEBUG [RS_OPEN_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-24T18:54:35,084 DEBUG [RS_OPEN_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-24T18:54:35,085 INFO [RS_OPEN_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=876193, jitterRate=0.11413705348968506}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-24T18:54:35,085 DEBUG [RS_OPEN_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-24T18:54:35,085 DEBUG [RS_OPEN_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732474475073Writing region info on filesystem at 1732474475073Initializing all the Stores at 1732474475074 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732474475074Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732474475075 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732474475075Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732474475075Cleaning up temporary data from old regions at 1732474475082 (+7 ms)Running coprocessor post-open hooks at 1732474475085 (+3 ms)Region opened successfully at 1732474475085 2024-11-24T18:54:35,086 INFO [RS_OPEN_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732474475046 2024-11-24T18:54:35,088 DEBUG [RS_OPEN_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-24T18:54:35,089 INFO [RS_OPEN_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-24T18:54:35,090 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=f2b92657890a,42587,1732474473474 2024-11-24T18:54:35,090 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as f2b92657890a,42587,1732474473474, state=OPEN 2024-11-24T18:54:35,128 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46733-0x1016e3286f40000, quorum=127.0.0.1:55864, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-24T18:54:35,128 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42587-0x1016e3286f40001, quorum=127.0.0.1:55864, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-24T18:54:35,128 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=f2b92657890a,42587,1732474473474 2024-11-24T18:54:35,128 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-24T18:54:35,128 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-24T18:54:35,131 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-24T18:54:35,132 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=f2b92657890a,42587,1732474473474 in 237 msec 2024-11-24T18:54:35,135 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-24T18:54:35,135 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 453 msec 2024-11-24T18:54:35,136 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-24T18:54:35,136 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-24T18:54:35,137 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-24T18:54:35,137 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=f2b92657890a,42587,1732474473474, seqNum=-1] 2024-11-24T18:54:35,137 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-24T18:54:35,139 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48533, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-24T18:54:35,144 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 903 msec 2024-11-24T18:54:35,144 INFO [master/f2b92657890a:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732474475144, completionTime=-1 2024-11-24T18:54:35,144 INFO [master/f2b92657890a:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-24T18:54:35,144 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-24T18:54:35,146 INFO [master/f2b92657890a:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-24T18:54:35,146 INFO [master/f2b92657890a:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732474535146 2024-11-24T18:54:35,146 INFO [master/f2b92657890a:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732474595146 2024-11-24T18:54:35,146 INFO [master/f2b92657890a:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 1 msec 2024-11-24T18:54:35,146 INFO [master/f2b92657890a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=f2b92657890a,46733,1732474473307-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T18:54:35,146 INFO [master/f2b92657890a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=f2b92657890a,46733,1732474473307-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T18:54:35,146 INFO [master/f2b92657890a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=f2b92657890a,46733,1732474473307-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T18:54:35,146 INFO [master/f2b92657890a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-f2b92657890a:46733, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T18:54:35,147 INFO [master/f2b92657890a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-24T18:54:35,147 INFO [master/f2b92657890a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-24T18:54:35,148 DEBUG [master/f2b92657890a:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-24T18:54:35,150 INFO [master/f2b92657890a:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.618sec 2024-11-24T18:54:35,150 INFO [master/f2b92657890a:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-24T18:54:35,150 INFO [master/f2b92657890a:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-24T18:54:35,150 INFO [master/f2b92657890a:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-24T18:54:35,150 INFO [master/f2b92657890a:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-24T18:54:35,150 INFO [master/f2b92657890a:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-24T18:54:35,150 INFO [master/f2b92657890a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=f2b92657890a,46733,1732474473307-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-24T18:54:35,150 INFO [master/f2b92657890a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=f2b92657890a,46733,1732474473307-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-24T18:54:35,153 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-24T18:54:35,153 INFO [master/f2b92657890a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-24T18:54:35,153 INFO [master/f2b92657890a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=f2b92657890a,46733,1732474473307-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T18:54:35,196 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@36963b10, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-24T18:54:35,196 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request f2b92657890a,46733,-1 for getting cluster id 2024-11-24T18:54:35,196 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-24T18:54:35,198 DEBUG [HMaster-EventLoopGroup-12-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '7ceaebf5-ff5e-4036-b2e4-d120e47d2f34' 2024-11-24T18:54:35,198 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-24T18:54:35,198 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "7ceaebf5-ff5e-4036-b2e4-d120e47d2f34" 2024-11-24T18:54:35,199 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@63e55293, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-24T18:54:35,199 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [f2b92657890a,46733,-1] 2024-11-24T18:54:35,199 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-24T18:54:35,199 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T18:54:35,200 INFO [HMaster-EventLoopGroup-12-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48392, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-24T18:54:35,201 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@33b8310b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-24T18:54:35,202 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-24T18:54:35,202 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=f2b92657890a,42587,1732474473474, seqNum=-1] 2024-11-24T18:54:35,203 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-24T18:54:35,204 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35040, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-24T18:54:35,205 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=f2b92657890a,46733,1732474473307 2024-11-24T18:54:35,206 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T18:54:35,208 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-24T18:54:35,209 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-24T18:54:35,210 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.AsyncConnectionImpl(321): The fetched master address is f2b92657890a,46733,1732474473307 2024-11-24T18:54:35,210 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@45be962d 2024-11-24T18:54:35,210 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-24T18:54:35,211 INFO [HMaster-EventLoopGroup-12-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48408, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-24T18:54:35,211 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46733 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-24T18:54:35,211 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46733 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-24T18:54:35,212 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46733 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testCompactionRecordDoesntBlockRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-24T18:54:35,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46733 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-24T18:54:35,214 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-11-24T18:54:35,214 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T18:54:35,214 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46733 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testCompactionRecordDoesntBlockRolling" procId is: 4 2024-11-24T18:54:35,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46733 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-24T18:54:35,216 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-24T18:54:35,226 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41449 is added to blk_1073741835_1011 (size=405) 2024-11-24T18:54:35,226 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44711 is added to blk_1073741835_1011 (size=405) 2024-11-24T18:54:35,227 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => ba3fc07d67f5b648498e4797f6088504, NAME => 'TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732474475211.ba3fc07d67f5b648498e4797f6088504.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testCompactionRecordDoesntBlockRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:42029/user/jenkins/test-data/1785304b-54f9-90ba-b91d-b1c10ec14b84 2024-11-24T18:54:35,233 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41449 is added to blk_1073741836_1012 (size=88) 2024-11-24T18:54:35,233 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44711 is added to blk_1073741836_1012 (size=88) 2024-11-24T18:54:35,237 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732474475211.ba3fc07d67f5b648498e4797f6088504.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T18:54:35,238 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1722): Closing ba3fc07d67f5b648498e4797f6088504, disabling compactions & flushes 2024-11-24T18:54:35,238 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732474475211.ba3fc07d67f5b648498e4797f6088504. 2024-11-24T18:54:35,238 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732474475211.ba3fc07d67f5b648498e4797f6088504. 2024-11-24T18:54:35,238 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732474475211.ba3fc07d67f5b648498e4797f6088504. after waiting 0 ms 2024-11-24T18:54:35,238 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732474475211.ba3fc07d67f5b648498e4797f6088504. 2024-11-24T18:54:35,238 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732474475211.ba3fc07d67f5b648498e4797f6088504. 2024-11-24T18:54:35,238 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for ba3fc07d67f5b648498e4797f6088504: Waiting for close lock at 1732474475237Disabling compacts and flushes for region at 1732474475237Disabling writes for close at 1732474475238 (+1 ms)Writing region close event to WAL at 1732474475238Closed at 1732474475238 2024-11-24T18:54:35,239 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_ADD_TO_META 2024-11-24T18:54:35,239 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732474475211.ba3fc07d67f5b648498e4797f6088504.","families":{"info":[{"qualifier":"regioninfo","vlen":87,"tag":[],"timestamp":"1732474475239"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732474475239"}]},"ts":"1732474475239"} 2024-11-24T18:54:35,241 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-24T18:54:35,242 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-24T18:54:35,243 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732474475242"}]},"ts":"1732474475242"} 2024-11-24T18:54:35,244 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testCompactionRecordDoesntBlockRolling, state=ENABLING in hbase:meta 2024-11-24T18:54:35,245 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=ba3fc07d67f5b648498e4797f6088504, ASSIGN}] 2024-11-24T18:54:35,246 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=ba3fc07d67f5b648498e4797f6088504, ASSIGN 2024-11-24T18:54:35,247 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=ba3fc07d67f5b648498e4797f6088504, ASSIGN; state=OFFLINE, location=f2b92657890a,42587,1732474473474; forceNewPlan=false, retain=false 2024-11-24T18:54:35,398 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=ba3fc07d67f5b648498e4797f6088504, regionState=OPENING, regionLocation=f2b92657890a,42587,1732474473474 2024-11-24T18:54:35,401 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-13-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=ba3fc07d67f5b648498e4797f6088504, ASSIGN because future has completed 2024-11-24T18:54:35,402 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure ba3fc07d67f5b648498e4797f6088504, server=f2b92657890a,42587,1732474473474}] 2024-11-24T18:54:35,430 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,43977,1732474387094/f2b92657890a%2C43977%2C1732474387094.1732474387326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:54:35,454 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.meta.1732474386931.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:54:35,561 INFO [RS_OPEN_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732474475211.ba3fc07d67f5b648498e4797f6088504. 2024-11-24T18:54:35,561 DEBUG [RS_OPEN_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => ba3fc07d67f5b648498e4797f6088504, NAME => 'TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732474475211.ba3fc07d67f5b648498e4797f6088504.', STARTKEY => '', ENDKEY => ''} 2024-11-24T18:54:35,561 DEBUG [RS_OPEN_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testCompactionRecordDoesntBlockRolling ba3fc07d67f5b648498e4797f6088504 2024-11-24T18:54:35,561 DEBUG [RS_OPEN_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732474475211.ba3fc07d67f5b648498e4797f6088504.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T18:54:35,561 DEBUG [RS_OPEN_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for ba3fc07d67f5b648498e4797f6088504 2024-11-24T18:54:35,561 DEBUG [RS_OPEN_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for ba3fc07d67f5b648498e4797f6088504 2024-11-24T18:54:35,562 INFO [StoreOpener-ba3fc07d67f5b648498e4797f6088504-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region ba3fc07d67f5b648498e4797f6088504 2024-11-24T18:54:35,564 INFO [StoreOpener-ba3fc07d67f5b648498e4797f6088504-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region ba3fc07d67f5b648498e4797f6088504 columnFamilyName info 2024-11-24T18:54:35,564 DEBUG [StoreOpener-ba3fc07d67f5b648498e4797f6088504-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T18:54:35,564 INFO [StoreOpener-ba3fc07d67f5b648498e4797f6088504-1 {}] regionserver.HStore(327): Store=ba3fc07d67f5b648498e4797f6088504/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T18:54:35,565 DEBUG [RS_OPEN_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for ba3fc07d67f5b648498e4797f6088504 2024-11-24T18:54:35,565 DEBUG [RS_OPEN_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42029/user/jenkins/test-data/1785304b-54f9-90ba-b91d-b1c10ec14b84/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/ba3fc07d67f5b648498e4797f6088504 2024-11-24T18:54:35,566 DEBUG [RS_OPEN_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42029/user/jenkins/test-data/1785304b-54f9-90ba-b91d-b1c10ec14b84/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/ba3fc07d67f5b648498e4797f6088504 2024-11-24T18:54:35,566 DEBUG [RS_OPEN_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for ba3fc07d67f5b648498e4797f6088504 2024-11-24T18:54:35,566 DEBUG [RS_OPEN_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for ba3fc07d67f5b648498e4797f6088504 2024-11-24T18:54:35,568 DEBUG [RS_OPEN_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for ba3fc07d67f5b648498e4797f6088504 2024-11-24T18:54:35,570 DEBUG [RS_OPEN_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42029/user/jenkins/test-data/1785304b-54f9-90ba-b91d-b1c10ec14b84/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/ba3fc07d67f5b648498e4797f6088504/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-24T18:54:35,570 INFO [RS_OPEN_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened ba3fc07d67f5b648498e4797f6088504; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=857108, jitterRate=0.0898701399564743}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-24T18:54:35,570 DEBUG [RS_OPEN_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for ba3fc07d67f5b648498e4797f6088504 2024-11-24T18:54:35,571 DEBUG [RS_OPEN_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for ba3fc07d67f5b648498e4797f6088504: Running coprocessor pre-open hook at 1732474475561Writing region info on filesystem at 1732474475561Initializing all the Stores at 1732474475562 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732474475562Cleaning up temporary data from old regions at 1732474475566 (+4 ms)Running coprocessor post-open hooks at 1732474475570 (+4 ms)Region opened successfully at 1732474475571 (+1 ms) 2024-11-24T18:54:35,572 INFO [RS_OPEN_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732474475211.ba3fc07d67f5b648498e4797f6088504., pid=6, masterSystemTime=1732474475557 2024-11-24T18:54:35,574 DEBUG [RS_OPEN_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732474475211.ba3fc07d67f5b648498e4797f6088504. 2024-11-24T18:54:35,574 INFO [RS_OPEN_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732474475211.ba3fc07d67f5b648498e4797f6088504. 2024-11-24T18:54:35,575 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=ba3fc07d67f5b648498e4797f6088504, regionState=OPEN, openSeqNum=2, regionLocation=f2b92657890a,42587,1732474473474 2024-11-24T18:54:35,577 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-13-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure ba3fc07d67f5b648498e4797f6088504, server=f2b92657890a,42587,1732474473474 because future has completed 2024-11-24T18:54:35,580 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-24T18:54:35,581 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure ba3fc07d67f5b648498e4797f6088504, server=f2b92657890a,42587,1732474473474 in 176 msec 2024-11-24T18:54:35,584 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-24T18:54:35,584 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=ba3fc07d67f5b648498e4797f6088504, ASSIGN in 336 msec 2024-11-24T18:54:35,585 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-24T18:54:35,585 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732474475585"}]},"ts":"1732474475585"} 2024-11-24T18:54:35,587 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testCompactionRecordDoesntBlockRolling, state=ENABLED in hbase:meta 2024-11-24T18:54:35,588 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_POST_OPERATION 2024-11-24T18:54:35,590 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 376 msec 2024-11-24T18:54:36,431 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,43977,1732474387094/f2b92657890a%2C43977%2C1732474387094.1732474387326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:54:36,455 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.meta.1732474386931.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:54:37,432 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,43977,1732474387094/f2b92657890a%2C43977%2C1732474387094.1732474387326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:54:37,456 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.meta.1732474386931.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:54:38,327 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-24T18:54:38,327 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-24T18:54:38,328 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-24T18:54:38,328 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-24T18:54:38,328 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-24T18:54:38,328 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-24T18:54:38,329 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-24T18:54:38,329 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling Metrics about Tables on a single HBase RegionServer 2024-11-24T18:54:38,433 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,43977,1732474387094/f2b92657890a%2C43977%2C1732474387094.1732474387326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:54:38,457 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.meta.1732474386931.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:54:39,434 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,43977,1732474387094/f2b92657890a%2C43977%2C1732474387094.1732474387326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:54:39,458 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.meta.1732474386931.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:54:40,434 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,43977,1732474387094/f2b92657890a%2C43977%2C1732474387094.1732474387326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:54:40,458 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.meta.1732474386931.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:54:40,577 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-24T18:54:40,578 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:54:40,578 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:54:40,578 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:54:40,578 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:54:40,578 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:54:40,579 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:54:40,602 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:54:40,602 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:54:40,602 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:54:40,603 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:54:40,603 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:54:40,604 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:54:40,610 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:54:40,611 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:54:40,611 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:54:40,615 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:54:40,623 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-24T18:54:40,623 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testCompactionRecordDoesntBlockRolling' 2024-11-24T18:54:41,435 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,43977,1732474387094/f2b92657890a%2C43977%2C1732474387094.1732474387326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:54:41,459 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.meta.1732474386931.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:54:42,436 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,43977,1732474387094/f2b92657890a%2C43977%2C1732474387094.1732474387326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:54:42,460 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.meta.1732474386931.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:54:43,437 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,43977,1732474387094/f2b92657890a%2C43977%2C1732474387094.1732474387326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:54:43,461 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.meta.1732474386931.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:54:44,438 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,43977,1732474387094/f2b92657890a%2C43977%2C1732474387094.1732474387326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:54:44,462 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.meta.1732474386931.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:54:45,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46733 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-24T18:54:45,283 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-24T18:54:45,283 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testCompactionRecordDoesntBlockRolling,, stopping at row=TestLogRolling-testCompactionRecordDoesntBlockRolling ,, for max=2147483647 with caching=100 2024-11-24T18:54:45,286 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-24T18:54:45,286 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732474475211.ba3fc07d67f5b648498e4797f6088504. 2024-11-24T18:54:45,291 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testCompactionRecordDoesntBlockRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732474475211.ba3fc07d67f5b648498e4797f6088504., hostname=f2b92657890a,42587,1732474473474, seqNum=2] 2024-11-24T18:54:45,300 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46733 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-24T18:54:45,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46733 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-24T18:54:45,308 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-24T18:54:45,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46733 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-24T18:54:45,309 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-24T18:54:45,311 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-24T18:54:45,438 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,43977,1732474387094/f2b92657890a%2C43977%2C1732474387094.1732474387326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:54:45,462 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.meta.1732474386931.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:54:45,473 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42587 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=8 2024-11-24T18:54:45,474 DEBUG [RS_FLUSH_OPERATIONS-regionserver/f2b92657890a:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732474475211.ba3fc07d67f5b648498e4797f6088504. 2024-11-24T18:54:45,474 INFO [RS_FLUSH_OPERATIONS-regionserver/f2b92657890a:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2902): Flushing ba3fc07d67f5b648498e4797f6088504 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-24T18:54:45,496 DEBUG [RS_FLUSH_OPERATIONS-regionserver/f2b92657890a:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42029/user/jenkins/test-data/1785304b-54f9-90ba-b91d-b1c10ec14b84/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/ba3fc07d67f5b648498e4797f6088504/.tmp/info/a00f16ec2ebf4556aedcc4b9c4651f11 is 1080, key is row0001/info:/1732474485293/Put/seqid=0 2024-11-24T18:54:45,501 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41449 is added to blk_1073741837_1013 (size=6033) 2024-11-24T18:54:45,501 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44711 is added to blk_1073741837_1013 (size=6033) 2024-11-24T18:54:45,902 INFO [RS_FLUSH_OPERATIONS-regionserver/f2b92657890a:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:42029/user/jenkins/test-data/1785304b-54f9-90ba-b91d-b1c10ec14b84/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/ba3fc07d67f5b648498e4797f6088504/.tmp/info/a00f16ec2ebf4556aedcc4b9c4651f11 2024-11-24T18:54:45,909 DEBUG [RS_FLUSH_OPERATIONS-regionserver/f2b92657890a:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42029/user/jenkins/test-data/1785304b-54f9-90ba-b91d-b1c10ec14b84/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/ba3fc07d67f5b648498e4797f6088504/.tmp/info/a00f16ec2ebf4556aedcc4b9c4651f11 as hdfs://localhost:42029/user/jenkins/test-data/1785304b-54f9-90ba-b91d-b1c10ec14b84/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/ba3fc07d67f5b648498e4797f6088504/info/a00f16ec2ebf4556aedcc4b9c4651f11 2024-11-24T18:54:45,917 INFO [RS_FLUSH_OPERATIONS-regionserver/f2b92657890a:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42029/user/jenkins/test-data/1785304b-54f9-90ba-b91d-b1c10ec14b84/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/ba3fc07d67f5b648498e4797f6088504/info/a00f16ec2ebf4556aedcc4b9c4651f11, entries=1, sequenceid=5, filesize=5.9 K 2024-11-24T18:54:45,919 INFO [RS_FLUSH_OPERATIONS-regionserver/f2b92657890a:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for ba3fc07d67f5b648498e4797f6088504 in 445ms, sequenceid=5, compaction requested=false 2024-11-24T18:54:45,919 DEBUG [RS_FLUSH_OPERATIONS-regionserver/f2b92657890a:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2603): Flush status journal for ba3fc07d67f5b648498e4797f6088504: 2024-11-24T18:54:45,919 DEBUG [RS_FLUSH_OPERATIONS-regionserver/f2b92657890a:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732474475211.ba3fc07d67f5b648498e4797f6088504. 2024-11-24T18:54:45,920 DEBUG [RS_FLUSH_OPERATIONS-regionserver/f2b92657890a:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=8 2024-11-24T18:54:45,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46733 {}] master.HMaster(4169): Remote procedure done, pid=8 2024-11-24T18:54:45,926 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-11-24T18:54:45,926 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 613 msec 2024-11-24T18:54:45,929 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 625 msec 2024-11-24T18:54:46,439 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,43977,1732474387094/f2b92657890a%2C43977%2C1732474387094.1732474387326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:54:46,463 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.meta.1732474386931.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:54:47,441 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,43977,1732474387094/f2b92657890a%2C43977%2C1732474387094.1732474387326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:54:47,464 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.meta.1732474386931.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:54:48,442 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,43977,1732474387094/f2b92657890a%2C43977%2C1732474387094.1732474387326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:54:48,465 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.meta.1732474386931.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:54:49,443 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,43977,1732474387094/f2b92657890a%2C43977%2C1732474387094.1732474387326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:54:49,466 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.meta.1732474386931.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:54:50,444 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,43977,1732474387094/f2b92657890a%2C43977%2C1732474387094.1732474387326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:54:50,467 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.meta.1732474386931.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:54:51,445 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,43977,1732474387094/f2b92657890a%2C43977%2C1732474387094.1732474387326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:54:51,468 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.meta.1732474386931.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:54:52,446 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,43977,1732474387094/f2b92657890a%2C43977%2C1732474387094.1732474387326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:54:52,469 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.meta.1732474386931.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:54:53,447 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,43977,1732474387094/f2b92657890a%2C43977%2C1732474387094.1732474387326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:54:53,447 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=2 on file=hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,43977,1732474387094/f2b92657890a%2C43977%2C1732474387094.1732474387326 after 68065ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor203.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T18:54:53,470 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.meta.1732474386931.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:54:53,470 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=2 on file=hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.meta.1732474386931.meta after 68054ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor203.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T18:54:54,448 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,43977,1732474387094/f2b92657890a%2C43977%2C1732474387094.1732474387326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:54:54,471 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.meta.1732474386931.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:54:55,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46733 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-24T18:54:55,322 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-24T18:54:55,326 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46733 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-24T18:54:55,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46733 {}] procedure2.ProcedureExecutor(1139): Stored pid=9, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-24T18:54:55,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46733 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=9 2024-11-24T18:54:55,364 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=9, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-24T18:54:55,365 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=9, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-24T18:54:55,365 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-24T18:54:55,449 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,43977,1732474387094/f2b92657890a%2C43977%2C1732474387094.1732474387326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:54:55,471 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.meta.1732474386931.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:54:55,519 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42587 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=10 2024-11-24T18:54:55,519 DEBUG [RS_FLUSH_OPERATIONS-regionserver/f2b92657890a:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732474475211.ba3fc07d67f5b648498e4797f6088504. 2024-11-24T18:54:55,519 INFO [RS_FLUSH_OPERATIONS-regionserver/f2b92657890a:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(2902): Flushing ba3fc07d67f5b648498e4797f6088504 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-24T18:54:55,524 DEBUG [RS_FLUSH_OPERATIONS-regionserver/f2b92657890a:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42029/user/jenkins/test-data/1785304b-54f9-90ba-b91d-b1c10ec14b84/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/ba3fc07d67f5b648498e4797f6088504/.tmp/info/47f0defbdeb041969c732300c0050207 is 1080, key is row0002/info:/1732474495324/Put/seqid=0 2024-11-24T18:54:55,530 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44711 is added to blk_1073741838_1014 (size=6033) 2024-11-24T18:54:55,530 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41449 is added to blk_1073741838_1014 (size=6033) 2024-11-24T18:54:55,531 INFO [RS_FLUSH_OPERATIONS-regionserver/f2b92657890a:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=9 (bloomFilter=true), to=hdfs://localhost:42029/user/jenkins/test-data/1785304b-54f9-90ba-b91d-b1c10ec14b84/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/ba3fc07d67f5b648498e4797f6088504/.tmp/info/47f0defbdeb041969c732300c0050207 2024-11-24T18:54:55,538 DEBUG [RS_FLUSH_OPERATIONS-regionserver/f2b92657890a:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42029/user/jenkins/test-data/1785304b-54f9-90ba-b91d-b1c10ec14b84/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/ba3fc07d67f5b648498e4797f6088504/.tmp/info/47f0defbdeb041969c732300c0050207 as hdfs://localhost:42029/user/jenkins/test-data/1785304b-54f9-90ba-b91d-b1c10ec14b84/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/ba3fc07d67f5b648498e4797f6088504/info/47f0defbdeb041969c732300c0050207 2024-11-24T18:54:55,545 INFO [RS_FLUSH_OPERATIONS-regionserver/f2b92657890a:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42029/user/jenkins/test-data/1785304b-54f9-90ba-b91d-b1c10ec14b84/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/ba3fc07d67f5b648498e4797f6088504/info/47f0defbdeb041969c732300c0050207, entries=1, sequenceid=9, filesize=5.9 K 2024-11-24T18:54:55,546 INFO [RS_FLUSH_OPERATIONS-regionserver/f2b92657890a:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for ba3fc07d67f5b648498e4797f6088504 in 27ms, sequenceid=9, compaction requested=false 2024-11-24T18:54:55,546 DEBUG [RS_FLUSH_OPERATIONS-regionserver/f2b92657890a:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(2603): Flush status journal for ba3fc07d67f5b648498e4797f6088504: 2024-11-24T18:54:55,546 DEBUG [RS_FLUSH_OPERATIONS-regionserver/f2b92657890a:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732474475211.ba3fc07d67f5b648498e4797f6088504. 2024-11-24T18:54:55,547 DEBUG [RS_FLUSH_OPERATIONS-regionserver/f2b92657890a:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=10 2024-11-24T18:54:55,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46733 {}] master.HMaster(4169): Remote procedure done, pid=10 2024-11-24T18:54:55,551 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=10, resume processing ppid=9 2024-11-24T18:54:55,551 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=9, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 183 msec 2024-11-24T18:54:55,554 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 226 msec 2024-11-24T18:54:56,450 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,43977,1732474387094/f2b92657890a%2C43977%2C1732474387094.1732474387326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:54:56,472 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.meta.1732474386931.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:54:57,450 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,43977,1732474387094/f2b92657890a%2C43977%2C1732474387094.1732474387326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:54:57,473 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.meta.1732474386931.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:54:58,451 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,43977,1732474387094/f2b92657890a%2C43977%2C1732474387094.1732474387326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:54:58,473 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.meta.1732474386931.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:54:59,452 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,43977,1732474387094/f2b92657890a%2C43977%2C1732474387094.1732474387326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:54:59,474 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.meta.1732474386931.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:55:00,452 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,43977,1732474387094/f2b92657890a%2C43977%2C1732474387094.1732474387326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:55:00,475 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.meta.1732474386931.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:55:01,453 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,43977,1732474387094/f2b92657890a%2C43977%2C1732474387094.1732474387326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:55:01,475 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.meta.1732474386931.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:55:02,454 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,43977,1732474387094/f2b92657890a%2C43977%2C1732474387094.1732474387326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:55:02,476 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.meta.1732474386931.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:55:03,262 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-24T18:55:03,454 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,43977,1732474387094/f2b92657890a%2C43977%2C1732474387094.1732474387326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:55:03,477 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.meta.1732474386931.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:55:04,455 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,43977,1732474387094/f2b92657890a%2C43977%2C1732474387094.1732474387326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:55:04,477 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.meta.1732474386931.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:55:05,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46733 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=9 2024-11-24T18:55:05,382 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-24T18:55:05,385 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor f2b92657890a%2C42587%2C1732474473474.1732474505385 2024-11-24T18:55:05,412 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:55:05,412 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:55:05,413 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:55:05,413 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:55:05,413 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:55:05,413 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/1785304b-54f9-90ba-b91d-b1c10ec14b84/WALs/f2b92657890a,42587,1732474473474/f2b92657890a%2C42587%2C1732474473474.1732474474471 with entries=8, filesize=5.41 KB; new WAL /user/jenkins/test-data/1785304b-54f9-90ba-b91d-b1c10ec14b84/WALs/f2b92657890a,42587,1732474473474/f2b92657890a%2C42587%2C1732474473474.1732474505385 2024-11-24T18:55:05,414 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39379:39379),(127.0.0.1/127.0.0.1:42865:42865)] 2024-11-24T18:55:05,415 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:42029/user/jenkins/test-data/1785304b-54f9-90ba-b91d-b1c10ec14b84/WALs/f2b92657890a,42587,1732474473474/f2b92657890a%2C42587%2C1732474473474.1732474474471 is not closed yet, will try archiving it next time 2024-11-24T18:55:05,416 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41449 is added to blk_1073741833_1009 (size=5546) 2024-11-24T18:55:05,416 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46733 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-24T18:55:05,416 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44711 is added to blk_1073741833_1009 (size=5546) 2024-11-24T18:55:05,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46733 {}] procedure2.ProcedureExecutor(1139): Stored pid=11, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-24T18:55:05,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46733 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=11 2024-11-24T18:55:05,419 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=11, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-24T18:55:05,421 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=11, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-24T18:55:05,421 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=12, ppid=11, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-24T18:55:05,456 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,43977,1732474387094/f2b92657890a%2C43977%2C1732474387094.1732474387326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:55:05,478 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.meta.1732474386931.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:55:05,574 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42587 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=12 2024-11-24T18:55:05,574 DEBUG [RS_FLUSH_OPERATIONS-regionserver/f2b92657890a:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732474475211.ba3fc07d67f5b648498e4797f6088504. 2024-11-24T18:55:05,574 INFO [RS_FLUSH_OPERATIONS-regionserver/f2b92657890a:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(2902): Flushing ba3fc07d67f5b648498e4797f6088504 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-24T18:55:05,579 DEBUG [RS_FLUSH_OPERATIONS-regionserver/f2b92657890a:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42029/user/jenkins/test-data/1785304b-54f9-90ba-b91d-b1c10ec14b84/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/ba3fc07d67f5b648498e4797f6088504/.tmp/info/1e3361e7130946479940af977a14ab5a is 1080, key is row0003/info:/1732474505383/Put/seqid=0 2024-11-24T18:55:05,583 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44711 is added to blk_1073741840_1016 (size=6033) 2024-11-24T18:55:05,584 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41449 is added to blk_1073741840_1016 (size=6033) 2024-11-24T18:55:05,584 INFO [RS_FLUSH_OPERATIONS-regionserver/f2b92657890a:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:42029/user/jenkins/test-data/1785304b-54f9-90ba-b91d-b1c10ec14b84/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/ba3fc07d67f5b648498e4797f6088504/.tmp/info/1e3361e7130946479940af977a14ab5a 2024-11-24T18:55:05,596 DEBUG [RS_FLUSH_OPERATIONS-regionserver/f2b92657890a:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42029/user/jenkins/test-data/1785304b-54f9-90ba-b91d-b1c10ec14b84/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/ba3fc07d67f5b648498e4797f6088504/.tmp/info/1e3361e7130946479940af977a14ab5a as hdfs://localhost:42029/user/jenkins/test-data/1785304b-54f9-90ba-b91d-b1c10ec14b84/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/ba3fc07d67f5b648498e4797f6088504/info/1e3361e7130946479940af977a14ab5a 2024-11-24T18:55:05,602 INFO [RS_FLUSH_OPERATIONS-regionserver/f2b92657890a:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42029/user/jenkins/test-data/1785304b-54f9-90ba-b91d-b1c10ec14b84/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/ba3fc07d67f5b648498e4797f6088504/info/1e3361e7130946479940af977a14ab5a, entries=1, sequenceid=13, filesize=5.9 K 2024-11-24T18:55:05,603 INFO [RS_FLUSH_OPERATIONS-regionserver/f2b92657890a:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for ba3fc07d67f5b648498e4797f6088504 in 29ms, sequenceid=13, compaction requested=true 2024-11-24T18:55:05,603 DEBUG [RS_FLUSH_OPERATIONS-regionserver/f2b92657890a:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(2603): Flush status journal for ba3fc07d67f5b648498e4797f6088504: 2024-11-24T18:55:05,603 DEBUG [RS_FLUSH_OPERATIONS-regionserver/f2b92657890a:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732474475211.ba3fc07d67f5b648498e4797f6088504. 2024-11-24T18:55:05,603 DEBUG [RS_FLUSH_OPERATIONS-regionserver/f2b92657890a:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=12 2024-11-24T18:55:05,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46733 {}] master.HMaster(4169): Remote procedure done, pid=12 2024-11-24T18:55:05,608 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=12, resume processing ppid=11 2024-11-24T18:55:05,608 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, ppid=11, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 184 msec 2024-11-24T18:55:05,610 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 192 msec 2024-11-24T18:55:06,456 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,43977,1732474387094/f2b92657890a%2C43977%2C1732474387094.1732474387326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:55:06,479 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.meta.1732474386931.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:55:07,457 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,43977,1732474387094/f2b92657890a%2C43977%2C1732474387094.1732474387326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:55:07,479 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.meta.1732474386931.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:55:08,458 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,43977,1732474387094/f2b92657890a%2C43977%2C1732474387094.1732474387326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:55:08,480 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.meta.1732474386931.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:55:09,459 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,43977,1732474387094/f2b92657890a%2C43977%2C1732474387094.1732474387326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:55:09,481 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.meta.1732474386931.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:55:10,460 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,43977,1732474387094/f2b92657890a%2C43977%2C1732474387094.1732474387326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:55:10,481 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.meta.1732474386931.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:55:11,460 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,43977,1732474387094/f2b92657890a%2C43977%2C1732474387094.1732474387326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:55:11,482 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.meta.1732474386931.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:55:12,461 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,43977,1732474387094/f2b92657890a%2C43977%2C1732474387094.1732474387326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:55:12,482 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.meta.1732474386931.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:55:13,462 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,43977,1732474387094/f2b92657890a%2C43977%2C1732474387094.1732474387326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:55:13,483 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.meta.1732474386931.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:55:14,462 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,43977,1732474387094/f2b92657890a%2C43977%2C1732474387094.1732474387326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:55:14,484 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.meta.1732474386931.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:55:15,153 INFO [master/f2b92657890a:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-24T18:55:15,154 INFO [master/f2b92657890a:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-24T18:55:15,463 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,43977,1732474387094/f2b92657890a%2C43977%2C1732474387094.1732474387326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:55:15,484 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.meta.1732474386931.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:55:15,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46733 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=11 2024-11-24T18:55:15,512 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-24T18:55:15,512 DEBUG [Time-limited test {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-24T18:55:15,513 DEBUG [Time-limited test {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 18099 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-24T18:55:15,513 DEBUG [Time-limited test {}] regionserver.HStore(1541): ba3fc07d67f5b648498e4797f6088504/info is initiating minor compaction (all files) 2024-11-24T18:55:15,514 INFO [Time-limited test {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-24T18:55:15,514 INFO [Time-limited test {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T18:55:15,514 INFO [Time-limited test {}] regionserver.HRegion(2416): Starting compaction of ba3fc07d67f5b648498e4797f6088504/info in TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732474475211.ba3fc07d67f5b648498e4797f6088504. 2024-11-24T18:55:15,514 INFO [Time-limited test {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42029/user/jenkins/test-data/1785304b-54f9-90ba-b91d-b1c10ec14b84/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/ba3fc07d67f5b648498e4797f6088504/info/a00f16ec2ebf4556aedcc4b9c4651f11, hdfs://localhost:42029/user/jenkins/test-data/1785304b-54f9-90ba-b91d-b1c10ec14b84/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/ba3fc07d67f5b648498e4797f6088504/info/47f0defbdeb041969c732300c0050207, hdfs://localhost:42029/user/jenkins/test-data/1785304b-54f9-90ba-b91d-b1c10ec14b84/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/ba3fc07d67f5b648498e4797f6088504/info/1e3361e7130946479940af977a14ab5a] into tmpdir=hdfs://localhost:42029/user/jenkins/test-data/1785304b-54f9-90ba-b91d-b1c10ec14b84/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/ba3fc07d67f5b648498e4797f6088504/.tmp, totalSize=17.7 K 2024-11-24T18:55:15,514 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting a00f16ec2ebf4556aedcc4b9c4651f11, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=5, earliestPutTs=1732474485293 2024-11-24T18:55:15,515 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 47f0defbdeb041969c732300c0050207, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=9, earliestPutTs=1732474495324 2024-11-24T18:55:15,515 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 1e3361e7130946479940af977a14ab5a, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1732474505383 2024-11-24T18:55:15,535 INFO [Time-limited test {}] throttle.PressureAwareThroughputController(145): ba3fc07d67f5b648498e4797f6088504#info#compaction#45 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-24T18:55:15,535 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42029/user/jenkins/test-data/1785304b-54f9-90ba-b91d-b1c10ec14b84/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/ba3fc07d67f5b648498e4797f6088504/.tmp/info/7787324066b2430b96bad4cf20ca033c is 1080, key is row0001/info:/1732474485293/Put/seqid=0 2024-11-24T18:55:15,540 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44711 is added to blk_1073741841_1017 (size=8296) 2024-11-24T18:55:15,540 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41449 is added to blk_1073741841_1017 (size=8296) 2024-11-24T18:55:15,547 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42029/user/jenkins/test-data/1785304b-54f9-90ba-b91d-b1c10ec14b84/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/ba3fc07d67f5b648498e4797f6088504/.tmp/info/7787324066b2430b96bad4cf20ca033c as hdfs://localhost:42029/user/jenkins/test-data/1785304b-54f9-90ba-b91d-b1c10ec14b84/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/ba3fc07d67f5b648498e4797f6088504/info/7787324066b2430b96bad4cf20ca033c 2024-11-24T18:55:15,554 INFO [Time-limited test {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in ba3fc07d67f5b648498e4797f6088504/info of ba3fc07d67f5b648498e4797f6088504 into 7787324066b2430b96bad4cf20ca033c(size=8.1 K), total size for store is 8.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-24T18:55:15,554 DEBUG [Time-limited test {}] regionserver.HRegion(2446): Compaction status journal for ba3fc07d67f5b648498e4797f6088504: 2024-11-24T18:55:15,556 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor f2b92657890a%2C42587%2C1732474473474.1732474515556 2024-11-24T18:55:15,562 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:55:15,562 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:55:15,562 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:55:15,562 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:55:15,563 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:55:15,563 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/1785304b-54f9-90ba-b91d-b1c10ec14b84/WALs/f2b92657890a,42587,1732474473474/f2b92657890a%2C42587%2C1732474473474.1732474505385 with entries=4, filesize=2.45 KB; new WAL /user/jenkins/test-data/1785304b-54f9-90ba-b91d-b1c10ec14b84/WALs/f2b92657890a,42587,1732474473474/f2b92657890a%2C42587%2C1732474473474.1732474515556 2024-11-24T18:55:15,564 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44711 is added to blk_1073741839_1015 (size=2520) 2024-11-24T18:55:15,564 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41449 is added to blk_1073741839_1015 (size=2520) 2024-11-24T18:55:15,568 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:42029/user/jenkins/test-data/1785304b-54f9-90ba-b91d-b1c10ec14b84/WALs/f2b92657890a,42587,1732474473474/f2b92657890a%2C42587%2C1732474473474.1732474474471 to hdfs://localhost:42029/user/jenkins/test-data/1785304b-54f9-90ba-b91d-b1c10ec14b84/oldWALs/f2b92657890a%2C42587%2C1732474473474.1732474474471 2024-11-24T18:55:15,569 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39379:39379),(127.0.0.1/127.0.0.1:42865:42865)] 2024-11-24T18:55:15,570 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46733 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-24T18:55:15,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46733 {}] procedure2.ProcedureExecutor(1139): Stored pid=13, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-24T18:55:15,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46733 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=13 2024-11-24T18:55:15,572 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=13, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-24T18:55:15,573 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=13, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-24T18:55:15,573 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=14, ppid=13, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-24T18:55:15,726 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42587 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=14 2024-11-24T18:55:15,726 DEBUG [RS_FLUSH_OPERATIONS-regionserver/f2b92657890a:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732474475211.ba3fc07d67f5b648498e4797f6088504. 2024-11-24T18:55:15,727 INFO [RS_FLUSH_OPERATIONS-regionserver/f2b92657890a:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(2902): Flushing ba3fc07d67f5b648498e4797f6088504 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-24T18:55:15,734 DEBUG [RS_FLUSH_OPERATIONS-regionserver/f2b92657890a:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42029/user/jenkins/test-data/1785304b-54f9-90ba-b91d-b1c10ec14b84/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/ba3fc07d67f5b648498e4797f6088504/.tmp/info/9ae8517782a143649ed38040dbb0783e is 1080, key is row0000/info:/1732474515555/Put/seqid=0 2024-11-24T18:55:15,739 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41449 is added to blk_1073741843_1019 (size=6033) 2024-11-24T18:55:15,741 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44711 is added to blk_1073741843_1019 (size=6033) 2024-11-24T18:55:15,741 INFO [RS_FLUSH_OPERATIONS-regionserver/f2b92657890a:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=18 (bloomFilter=true), to=hdfs://localhost:42029/user/jenkins/test-data/1785304b-54f9-90ba-b91d-b1c10ec14b84/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/ba3fc07d67f5b648498e4797f6088504/.tmp/info/9ae8517782a143649ed38040dbb0783e 2024-11-24T18:55:15,748 DEBUG [RS_FLUSH_OPERATIONS-regionserver/f2b92657890a:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42029/user/jenkins/test-data/1785304b-54f9-90ba-b91d-b1c10ec14b84/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/ba3fc07d67f5b648498e4797f6088504/.tmp/info/9ae8517782a143649ed38040dbb0783e as hdfs://localhost:42029/user/jenkins/test-data/1785304b-54f9-90ba-b91d-b1c10ec14b84/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/ba3fc07d67f5b648498e4797f6088504/info/9ae8517782a143649ed38040dbb0783e 2024-11-24T18:55:15,754 INFO [RS_FLUSH_OPERATIONS-regionserver/f2b92657890a:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42029/user/jenkins/test-data/1785304b-54f9-90ba-b91d-b1c10ec14b84/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/ba3fc07d67f5b648498e4797f6088504/info/9ae8517782a143649ed38040dbb0783e, entries=1, sequenceid=18, filesize=5.9 K 2024-11-24T18:55:15,755 INFO [RS_FLUSH_OPERATIONS-regionserver/f2b92657890a:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for ba3fc07d67f5b648498e4797f6088504 in 28ms, sequenceid=18, compaction requested=false 2024-11-24T18:55:15,755 DEBUG [RS_FLUSH_OPERATIONS-regionserver/f2b92657890a:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(2603): Flush status journal for ba3fc07d67f5b648498e4797f6088504: 2024-11-24T18:55:15,756 DEBUG [RS_FLUSH_OPERATIONS-regionserver/f2b92657890a:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732474475211.ba3fc07d67f5b648498e4797f6088504. 2024-11-24T18:55:15,756 DEBUG [RS_FLUSH_OPERATIONS-regionserver/f2b92657890a:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=14 2024-11-24T18:55:15,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46733 {}] master.HMaster(4169): Remote procedure done, pid=14 2024-11-24T18:55:15,760 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=14, resume processing ppid=13 2024-11-24T18:55:15,760 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=14, ppid=13, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 184 msec 2024-11-24T18:55:15,763 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 191 msec 2024-11-24T18:55:16,463 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,43977,1732474387094/f2b92657890a%2C43977%2C1732474387094.1732474387326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:55:16,485 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.meta.1732474386931.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:55:17,464 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,43977,1732474387094/f2b92657890a%2C43977%2C1732474387094.1732474387326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:55:17,485 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.meta.1732474386931.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:55:18,465 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,43977,1732474387094/f2b92657890a%2C43977%2C1732474387094.1732474387326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:55:18,486 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.meta.1732474386931.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:55:19,465 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,43977,1732474387094/f2b92657890a%2C43977%2C1732474387094.1732474387326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:55:19,486 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.meta.1732474386931.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:55:20,466 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,43977,1732474387094/f2b92657890a%2C43977%2C1732474387094.1732474387326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:55:20,487 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.meta.1732474386931.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:55:20,561 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region ba3fc07d67f5b648498e4797f6088504, had cached 0 bytes from a total of 14329 2024-11-24T18:55:21,467 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,43977,1732474387094/f2b92657890a%2C43977%2C1732474387094.1732474387326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:55:21,487 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.meta.1732474386931.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:55:22,467 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,43977,1732474387094/f2b92657890a%2C43977%2C1732474387094.1732474387326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:55:22,488 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.meta.1732474386931.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:55:23,468 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,43977,1732474387094/f2b92657890a%2C43977%2C1732474387094.1732474387326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:55:23,489 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.meta.1732474386931.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:55:24,468 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,43977,1732474387094/f2b92657890a%2C43977%2C1732474387094.1732474387326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:55:24,489 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.meta.1732474386931.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:55:25,469 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,43977,1732474387094/f2b92657890a%2C43977%2C1732474387094.1732474387326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:55:25,490 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.meta.1732474386931.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:55:25,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46733 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=13 2024-11-24T18:55:25,623 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-24T18:55:25,627 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor f2b92657890a%2C42587%2C1732474473474.1732474525627 2024-11-24T18:55:25,635 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:55:25,635 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:55:25,636 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:55:25,636 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:55:25,636 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:55:25,636 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/1785304b-54f9-90ba-b91d-b1c10ec14b84/WALs/f2b92657890a,42587,1732474473474/f2b92657890a%2C42587%2C1732474473474.1732474515556 with entries=3, filesize=1.97 KB; new WAL /user/jenkins/test-data/1785304b-54f9-90ba-b91d-b1c10ec14b84/WALs/f2b92657890a,42587,1732474473474/f2b92657890a%2C42587%2C1732474473474.1732474525627 2024-11-24T18:55:25,637 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39379:39379),(127.0.0.1/127.0.0.1:42865:42865)] 2024-11-24T18:55:25,637 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:42029/user/jenkins/test-data/1785304b-54f9-90ba-b91d-b1c10ec14b84/WALs/f2b92657890a,42587,1732474473474/f2b92657890a%2C42587%2C1732474473474.1732474515556 is not closed yet, will try archiving it next time 2024-11-24T18:55:25,637 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:42029/user/jenkins/test-data/1785304b-54f9-90ba-b91d-b1c10ec14b84/WALs/f2b92657890a,42587,1732474473474/f2b92657890a%2C42587%2C1732474473474.1732474505385 to hdfs://localhost:42029/user/jenkins/test-data/1785304b-54f9-90ba-b91d-b1c10ec14b84/oldWALs/f2b92657890a%2C42587%2C1732474473474.1732474505385 2024-11-24T18:55:25,637 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-24T18:55:25,637 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-24T18:55:25,637 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-24T18:55:25,637 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T18:55:25,638 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T18:55:25,638 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-24T18:55:25,638 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1258462092, stopped=false 2024-11-24T18:55:25,638 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-24T18:55:25,638 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=f2b92657890a,46733,1732474473307 2024-11-24T18:55:25,643 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41449 is added to blk_1073741842_1018 (size=2026) 2024-11-24T18:55:25,643 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44711 is added to blk_1073741842_1018 (size=2026) 2024-11-24T18:55:25,655 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46733-0x1016e3286f40000, quorum=127.0.0.1:55864, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-24T18:55:25,655 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42587-0x1016e3286f40001, quorum=127.0.0.1:55864, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-24T18:55:25,655 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46733-0x1016e3286f40000, quorum=127.0.0.1:55864, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T18:55:25,655 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42587-0x1016e3286f40001, quorum=127.0.0.1:55864, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T18:55:25,655 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-24T18:55:25,656 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-24T18:55:25,656 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:46733-0x1016e3286f40000, quorum=127.0.0.1:55864, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-24T18:55:25,656 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-24T18:55:25,656 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T18:55:25,656 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:42587-0x1016e3286f40001, quorum=127.0.0.1:55864, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-24T18:55:25,656 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'f2b92657890a,42587,1732474473474' ***** 2024-11-24T18:55:25,656 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-24T18:55:25,656 INFO [RS:0;f2b92657890a:42587 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-24T18:55:25,656 INFO [RS:0;f2b92657890a:42587 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-24T18:55:25,656 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-24T18:55:25,656 INFO [RS:0;f2b92657890a:42587 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-24T18:55:25,656 INFO [RS:0;f2b92657890a:42587 {}] regionserver.HRegionServer(3091): Received CLOSE for ba3fc07d67f5b648498e4797f6088504 2024-11-24T18:55:25,657 INFO [RS:0;f2b92657890a:42587 {}] regionserver.HRegionServer(959): stopping server f2b92657890a,42587,1732474473474 2024-11-24T18:55:25,657 INFO [RS:0;f2b92657890a:42587 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-24T18:55:25,657 INFO [RS:0;f2b92657890a:42587 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;f2b92657890a:42587. 2024-11-24T18:55:25,657 DEBUG [RS:0;f2b92657890a:42587 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-24T18:55:25,657 DEBUG [RS:0;f2b92657890a:42587 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T18:55:25,657 DEBUG [RS_CLOSE_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing ba3fc07d67f5b648498e4797f6088504, disabling compactions & flushes 2024-11-24T18:55:25,657 INFO [RS:0;f2b92657890a:42587 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-24T18:55:25,657 INFO [RS:0;f2b92657890a:42587 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-24T18:55:25,657 INFO [RS_CLOSE_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732474475211.ba3fc07d67f5b648498e4797f6088504. 2024-11-24T18:55:25,657 INFO [RS:0;f2b92657890a:42587 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-24T18:55:25,657 DEBUG [RS_CLOSE_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732474475211.ba3fc07d67f5b648498e4797f6088504. 2024-11-24T18:55:25,657 INFO [RS:0;f2b92657890a:42587 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-24T18:55:25,657 DEBUG [RS_CLOSE_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732474475211.ba3fc07d67f5b648498e4797f6088504. after waiting 0 ms 2024-11-24T18:55:25,657 DEBUG [RS_CLOSE_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732474475211.ba3fc07d67f5b648498e4797f6088504. 2024-11-24T18:55:25,657 INFO [RS:0;f2b92657890a:42587 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-24T18:55:25,657 INFO [RS_CLOSE_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing ba3fc07d67f5b648498e4797f6088504 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-24T18:55:25,657 DEBUG [RS:0;f2b92657890a:42587 {}] regionserver.HRegionServer(1325): Online Regions={ba3fc07d67f5b648498e4797f6088504=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732474475211.ba3fc07d67f5b648498e4797f6088504., 1588230740=hbase:meta,,1.1588230740} 2024-11-24T18:55:25,657 DEBUG [RS_CLOSE_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-24T18:55:25,657 DEBUG [RS:0;f2b92657890a:42587 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, ba3fc07d67f5b648498e4797f6088504 2024-11-24T18:55:25,657 INFO [RS_CLOSE_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-24T18:55:25,657 DEBUG [RS_CLOSE_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-24T18:55:25,657 DEBUG [RS_CLOSE_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-24T18:55:25,657 DEBUG [RS_CLOSE_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-24T18:55:25,657 INFO [RS_CLOSE_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.89 KB heapSize=3.91 KB 2024-11-24T18:55:25,661 DEBUG [RS_CLOSE_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42029/user/jenkins/test-data/1785304b-54f9-90ba-b91d-b1c10ec14b84/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/ba3fc07d67f5b648498e4797f6088504/.tmp/info/4396f33167954943a8d3b1ba61b4095c is 1080, key is row0001/info:/1732474525624/Put/seqid=0 2024-11-24T18:55:25,665 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41449 is added to blk_1073741845_1021 (size=6033) 2024-11-24T18:55:25,665 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44711 is added to blk_1073741845_1021 (size=6033) 2024-11-24T18:55:25,665 INFO [RS_CLOSE_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=22 (bloomFilter=true), to=hdfs://localhost:42029/user/jenkins/test-data/1785304b-54f9-90ba-b91d-b1c10ec14b84/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/ba3fc07d67f5b648498e4797f6088504/.tmp/info/4396f33167954943a8d3b1ba61b4095c 2024-11-24T18:55:25,672 DEBUG [RS_CLOSE_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42029/user/jenkins/test-data/1785304b-54f9-90ba-b91d-b1c10ec14b84/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/ba3fc07d67f5b648498e4797f6088504/.tmp/info/4396f33167954943a8d3b1ba61b4095c as hdfs://localhost:42029/user/jenkins/test-data/1785304b-54f9-90ba-b91d-b1c10ec14b84/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/ba3fc07d67f5b648498e4797f6088504/info/4396f33167954943a8d3b1ba61b4095c 2024-11-24T18:55:25,674 DEBUG [RS_CLOSE_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42029/user/jenkins/test-data/1785304b-54f9-90ba-b91d-b1c10ec14b84/data/hbase/meta/1588230740/.tmp/info/62677e1440a4480bb4d6989c2451529d is 227, key is TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732474475211.ba3fc07d67f5b648498e4797f6088504./info:regioninfo/1732474475575/Put/seqid=0 2024-11-24T18:55:25,677 INFO [RS_CLOSE_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42029/user/jenkins/test-data/1785304b-54f9-90ba-b91d-b1c10ec14b84/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/ba3fc07d67f5b648498e4797f6088504/info/4396f33167954943a8d3b1ba61b4095c, entries=1, sequenceid=22, filesize=5.9 K 2024-11-24T18:55:25,678 INFO [RS_CLOSE_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for ba3fc07d67f5b648498e4797f6088504 in 21ms, sequenceid=22, compaction requested=true 2024-11-24T18:55:25,679 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732474475211.ba3fc07d67f5b648498e4797f6088504.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:42029/user/jenkins/test-data/1785304b-54f9-90ba-b91d-b1c10ec14b84/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/ba3fc07d67f5b648498e4797f6088504/info/a00f16ec2ebf4556aedcc4b9c4651f11, hdfs://localhost:42029/user/jenkins/test-data/1785304b-54f9-90ba-b91d-b1c10ec14b84/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/ba3fc07d67f5b648498e4797f6088504/info/47f0defbdeb041969c732300c0050207, hdfs://localhost:42029/user/jenkins/test-data/1785304b-54f9-90ba-b91d-b1c10ec14b84/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/ba3fc07d67f5b648498e4797f6088504/info/1e3361e7130946479940af977a14ab5a] to archive 2024-11-24T18:55:25,679 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41449 is added to blk_1073741846_1022 (size=7308) 2024-11-24T18:55:25,679 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44711 is added to blk_1073741846_1022 (size=7308) 2024-11-24T18:55:25,680 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732474475211.ba3fc07d67f5b648498e4797f6088504.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-24T18:55:25,680 INFO [RS_CLOSE_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.65 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:42029/user/jenkins/test-data/1785304b-54f9-90ba-b91d-b1c10ec14b84/data/hbase/meta/1588230740/.tmp/info/62677e1440a4480bb4d6989c2451529d 2024-11-24T18:55:25,681 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732474475211.ba3fc07d67f5b648498e4797f6088504.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42029/user/jenkins/test-data/1785304b-54f9-90ba-b91d-b1c10ec14b84/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/ba3fc07d67f5b648498e4797f6088504/info/a00f16ec2ebf4556aedcc4b9c4651f11 to hdfs://localhost:42029/user/jenkins/test-data/1785304b-54f9-90ba-b91d-b1c10ec14b84/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/ba3fc07d67f5b648498e4797f6088504/info/a00f16ec2ebf4556aedcc4b9c4651f11 2024-11-24T18:55:25,682 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732474475211.ba3fc07d67f5b648498e4797f6088504.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42029/user/jenkins/test-data/1785304b-54f9-90ba-b91d-b1c10ec14b84/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/ba3fc07d67f5b648498e4797f6088504/info/47f0defbdeb041969c732300c0050207 to hdfs://localhost:42029/user/jenkins/test-data/1785304b-54f9-90ba-b91d-b1c10ec14b84/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/ba3fc07d67f5b648498e4797f6088504/info/47f0defbdeb041969c732300c0050207 2024-11-24T18:55:25,683 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732474475211.ba3fc07d67f5b648498e4797f6088504.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42029/user/jenkins/test-data/1785304b-54f9-90ba-b91d-b1c10ec14b84/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/ba3fc07d67f5b648498e4797f6088504/info/1e3361e7130946479940af977a14ab5a to hdfs://localhost:42029/user/jenkins/test-data/1785304b-54f9-90ba-b91d-b1c10ec14b84/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/ba3fc07d67f5b648498e4797f6088504/info/1e3361e7130946479940af977a14ab5a 2024-11-24T18:55:25,684 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732474475211.ba3fc07d67f5b648498e4797f6088504.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=f2b92657890a:46733 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 16 more 2024-11-24T18:55:25,684 WARN [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732474475211.ba3fc07d67f5b648498e4797f6088504.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [a00f16ec2ebf4556aedcc4b9c4651f11=6033, 47f0defbdeb041969c732300c0050207=6033, 1e3361e7130946479940af977a14ab5a=6033] 2024-11-24T18:55:25,687 DEBUG [RS_CLOSE_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42029/user/jenkins/test-data/1785304b-54f9-90ba-b91d-b1c10ec14b84/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/ba3fc07d67f5b648498e4797f6088504/recovered.edits/25.seqid, newMaxSeqId=25, maxSeqId=1 2024-11-24T18:55:25,688 INFO [RS_CLOSE_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732474475211.ba3fc07d67f5b648498e4797f6088504. 2024-11-24T18:55:25,688 DEBUG [RS_CLOSE_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for ba3fc07d67f5b648498e4797f6088504: Waiting for close lock at 1732474525657Running coprocessor pre-close hooks at 1732474525657Disabling compacts and flushes for region at 1732474525657Disabling writes for close at 1732474525657Obtaining lock to block concurrent updates at 1732474525657Preparing flush snapshotting stores in ba3fc07d67f5b648498e4797f6088504 at 1732474525657Finished memstore snapshotting TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732474475211.ba3fc07d67f5b648498e4797f6088504., syncing WAL and waiting on mvcc, flushsize=dataSize=1076, getHeapSize=1392, getOffHeapSize=0, getCellsCount=1 at 1732474525657Flushing stores of TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732474475211.ba3fc07d67f5b648498e4797f6088504. at 1732474525658 (+1 ms)Flushing ba3fc07d67f5b648498e4797f6088504/info: creating writer at 1732474525658Flushing ba3fc07d67f5b648498e4797f6088504/info: appending metadata at 1732474525660 (+2 ms)Flushing ba3fc07d67f5b648498e4797f6088504/info: closing flushed file at 1732474525660Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@14bdb0cb: reopening flushed file at 1732474525672 (+12 ms)Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for ba3fc07d67f5b648498e4797f6088504 in 21ms, sequenceid=22, compaction requested=true at 1732474525678 (+6 ms)Writing region close event to WAL at 1732474525685 (+7 ms)Running coprocessor post-close hooks at 1732474525688 (+3 ms)Closed at 1732474525688 2024-11-24T18:55:25,688 DEBUG [RS_CLOSE_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732474475211.ba3fc07d67f5b648498e4797f6088504. 2024-11-24T18:55:25,699 DEBUG [RS_CLOSE_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42029/user/jenkins/test-data/1785304b-54f9-90ba-b91d-b1c10ec14b84/data/hbase/meta/1588230740/.tmp/ns/c32a7a7a0fe04875b726d4818d532f65 is 43, key is default/ns:d/1732474475139/Put/seqid=0 2024-11-24T18:55:25,704 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41449 is added to blk_1073741847_1023 (size=5153) 2024-11-24T18:55:25,704 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44711 is added to blk_1073741847_1023 (size=5153) 2024-11-24T18:55:25,704 INFO [RS_CLOSE_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:42029/user/jenkins/test-data/1785304b-54f9-90ba-b91d-b1c10ec14b84/data/hbase/meta/1588230740/.tmp/ns/c32a7a7a0fe04875b726d4818d532f65 2024-11-24T18:55:25,723 DEBUG [RS_CLOSE_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42029/user/jenkins/test-data/1785304b-54f9-90ba-b91d-b1c10ec14b84/data/hbase/meta/1588230740/.tmp/table/8d7b45850e9d4f4b9472c7f0d5ad0d17 is 89, key is TestLogRolling-testCompactionRecordDoesntBlockRolling/table:state/1732474475585/Put/seqid=0 2024-11-24T18:55:25,727 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44711 is added to blk_1073741848_1024 (size=5508) 2024-11-24T18:55:25,727 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41449 is added to blk_1073741848_1024 (size=5508) 2024-11-24T18:55:25,728 INFO [RS_CLOSE_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=170 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:42029/user/jenkins/test-data/1785304b-54f9-90ba-b91d-b1c10ec14b84/data/hbase/meta/1588230740/.tmp/table/8d7b45850e9d4f4b9472c7f0d5ad0d17 2024-11-24T18:55:25,732 DEBUG [RS_CLOSE_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42029/user/jenkins/test-data/1785304b-54f9-90ba-b91d-b1c10ec14b84/data/hbase/meta/1588230740/.tmp/info/62677e1440a4480bb4d6989c2451529d as hdfs://localhost:42029/user/jenkins/test-data/1785304b-54f9-90ba-b91d-b1c10ec14b84/data/hbase/meta/1588230740/info/62677e1440a4480bb4d6989c2451529d 2024-11-24T18:55:25,738 INFO [RS_CLOSE_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42029/user/jenkins/test-data/1785304b-54f9-90ba-b91d-b1c10ec14b84/data/hbase/meta/1588230740/info/62677e1440a4480bb4d6989c2451529d, entries=10, sequenceid=11, filesize=7.1 K 2024-11-24T18:55:25,739 DEBUG [RS_CLOSE_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42029/user/jenkins/test-data/1785304b-54f9-90ba-b91d-b1c10ec14b84/data/hbase/meta/1588230740/.tmp/ns/c32a7a7a0fe04875b726d4818d532f65 as hdfs://localhost:42029/user/jenkins/test-data/1785304b-54f9-90ba-b91d-b1c10ec14b84/data/hbase/meta/1588230740/ns/c32a7a7a0fe04875b726d4818d532f65 2024-11-24T18:55:25,745 INFO [RS_CLOSE_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42029/user/jenkins/test-data/1785304b-54f9-90ba-b91d-b1c10ec14b84/data/hbase/meta/1588230740/ns/c32a7a7a0fe04875b726d4818d532f65, entries=2, sequenceid=11, filesize=5.0 K 2024-11-24T18:55:25,746 DEBUG [RS_CLOSE_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42029/user/jenkins/test-data/1785304b-54f9-90ba-b91d-b1c10ec14b84/data/hbase/meta/1588230740/.tmp/table/8d7b45850e9d4f4b9472c7f0d5ad0d17 as hdfs://localhost:42029/user/jenkins/test-data/1785304b-54f9-90ba-b91d-b1c10ec14b84/data/hbase/meta/1588230740/table/8d7b45850e9d4f4b9472c7f0d5ad0d17 2024-11-24T18:55:25,751 INFO [RS_CLOSE_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42029/user/jenkins/test-data/1785304b-54f9-90ba-b91d-b1c10ec14b84/data/hbase/meta/1588230740/table/8d7b45850e9d4f4b9472c7f0d5ad0d17, entries=2, sequenceid=11, filesize=5.4 K 2024-11-24T18:55:25,753 INFO [RS_CLOSE_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.89 KB/1932, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 1588230740 in 96ms, sequenceid=11, compaction requested=false 2024-11-24T18:55:25,758 DEBUG [RS_CLOSE_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42029/user/jenkins/test-data/1785304b-54f9-90ba-b91d-b1c10ec14b84/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-24T18:55:25,758 DEBUG [RS_CLOSE_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-24T18:55:25,758 INFO [RS_CLOSE_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-24T18:55:25,758 DEBUG [RS_CLOSE_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732474525657Running coprocessor pre-close hooks at 1732474525657Disabling compacts and flushes for region at 1732474525657Disabling writes for close at 1732474525657Obtaining lock to block concurrent updates at 1732474525657Preparing flush snapshotting stores in 1588230740 at 1732474525657Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1932, getHeapSize=3936, getOffHeapSize=0, getCellsCount=14 at 1732474525658 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1732474525658Flushing 1588230740/info: creating writer at 1732474525658Flushing 1588230740/info: appending metadata at 1732474525674 (+16 ms)Flushing 1588230740/info: closing flushed file at 1732474525674Flushing 1588230740/ns: creating writer at 1732474525685 (+11 ms)Flushing 1588230740/ns: appending metadata at 1732474525699 (+14 ms)Flushing 1588230740/ns: closing flushed file at 1732474525699Flushing 1588230740/table: creating writer at 1732474525708 (+9 ms)Flushing 1588230740/table: appending metadata at 1732474525722 (+14 ms)Flushing 1588230740/table: closing flushed file at 1732474525722Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2734cd34: reopening flushed file at 1732474525731 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7f12a47b: reopening flushed file at 1732474525738 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2bc54838: reopening flushed file at 1732474525745 (+7 ms)Finished flush of dataSize ~1.89 KB/1932, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 1588230740 in 96ms, sequenceid=11, compaction requested=false at 1732474525753 (+8 ms)Writing region close event to WAL at 1732474525754 (+1 ms)Running coprocessor post-close hooks at 1732474525758 (+4 ms)Closed at 1732474525758 2024-11-24T18:55:25,758 DEBUG [RS_CLOSE_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-24T18:55:25,857 INFO [RS:0;f2b92657890a:42587 {}] regionserver.HRegionServer(976): stopping server f2b92657890a,42587,1732474473474; all regions closed. 2024-11-24T18:55:25,858 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:55:25,859 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:55:25,859 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:55:25,859 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:55:25,859 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:55:25,863 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44711 is added to blk_1073741834_1010 (size=3306) 2024-11-24T18:55:25,864 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41449 is added to blk_1073741834_1010 (size=3306) 2024-11-24T18:55:25,869 DEBUG [RS:0;f2b92657890a:42587 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/1785304b-54f9-90ba-b91d-b1c10ec14b84/oldWALs 2024-11-24T18:55:25,869 INFO [RS:0;f2b92657890a:42587 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog f2b92657890a%2C42587%2C1732474473474.meta:.meta(num 1732474475060) 2024-11-24T18:55:25,870 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:55:25,870 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:55:25,870 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:55:25,870 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:55:25,870 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:55:25,873 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44711 is added to blk_1073741844_1020 (size=1252) 2024-11-24T18:55:25,874 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41449 is added to blk_1073741844_1020 (size=1252) 2024-11-24T18:55:26,039 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:42029/user/jenkins/test-data/1785304b-54f9-90ba-b91d-b1c10ec14b84/WALs/f2b92657890a,42587,1732474473474/f2b92657890a%2C42587%2C1732474473474.1732474515556 to hdfs://localhost:42029/user/jenkins/test-data/1785304b-54f9-90ba-b91d-b1c10ec14b84/oldWALs/f2b92657890a%2C42587%2C1732474473474.1732474515556 2024-11-24T18:55:26,042 DEBUG [RS:0;f2b92657890a:42587 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/1785304b-54f9-90ba-b91d-b1c10ec14b84/oldWALs 2024-11-24T18:55:26,042 INFO [RS:0;f2b92657890a:42587 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog f2b92657890a%2C42587%2C1732474473474:(num 1732474525627) 2024-11-24T18:55:26,043 DEBUG [RS:0;f2b92657890a:42587 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T18:55:26,043 INFO [RS:0;f2b92657890a:42587 {}] regionserver.LeaseManager(133): Closed leases 2024-11-24T18:55:26,043 INFO [RS:0;f2b92657890a:42587 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-24T18:55:26,043 INFO [RS:0;f2b92657890a:42587 {}] hbase.ChoreService(370): Chore service for: regionserver/f2b92657890a:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-24T18:55:26,043 INFO [RS:0;f2b92657890a:42587 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-24T18:55:26,043 INFO [regionserver/f2b92657890a:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-24T18:55:26,043 INFO [RS:0;f2b92657890a:42587 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:42587 2024-11-24T18:55:26,097 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46733-0x1016e3286f40000, quorum=127.0.0.1:55864, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-24T18:55:26,097 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42587-0x1016e3286f40001, quorum=127.0.0.1:55864, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/f2b92657890a,42587,1732474473474 2024-11-24T18:55:26,097 INFO [RS:0;f2b92657890a:42587 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-24T18:55:26,098 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [f2b92657890a,42587,1732474473474] 2024-11-24T18:55:26,118 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/f2b92657890a,42587,1732474473474 already deleted, retry=false 2024-11-24T18:55:26,118 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; f2b92657890a,42587,1732474473474 expired; onlineServers=0 2024-11-24T18:55:26,118 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'f2b92657890a,46733,1732474473307' ***** 2024-11-24T18:55:26,118 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-24T18:55:26,118 INFO [M:0;f2b92657890a:46733 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-24T18:55:26,118 INFO [M:0;f2b92657890a:46733 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-24T18:55:26,118 DEBUG [M:0;f2b92657890a:46733 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-24T18:55:26,119 DEBUG [M:0;f2b92657890a:46733 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-24T18:55:26,119 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-24T18:55:26,119 DEBUG [master/f2b92657890a:0:becomeActiveMaster-HFileCleaner.small.0-1732474474245 {}] cleaner.HFileCleaner(306): Exit Thread[master/f2b92657890a:0:becomeActiveMaster-HFileCleaner.small.0-1732474474245,5,FailOnTimeoutGroup] 2024-11-24T18:55:26,119 DEBUG [master/f2b92657890a:0:becomeActiveMaster-HFileCleaner.large.0-1732474474245 {}] cleaner.HFileCleaner(306): Exit Thread[master/f2b92657890a:0:becomeActiveMaster-HFileCleaner.large.0-1732474474245,5,FailOnTimeoutGroup] 2024-11-24T18:55:26,119 INFO [M:0;f2b92657890a:46733 {}] hbase.ChoreService(370): Chore service for: master/f2b92657890a:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-24T18:55:26,119 INFO [M:0;f2b92657890a:46733 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-24T18:55:26,119 DEBUG [M:0;f2b92657890a:46733 {}] master.HMaster(1795): Stopping service threads 2024-11-24T18:55:26,119 INFO [M:0;f2b92657890a:46733 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-24T18:55:26,119 INFO [M:0;f2b92657890a:46733 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-24T18:55:26,119 INFO [M:0;f2b92657890a:46733 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-24T18:55:26,119 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-24T18:55:26,129 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46733-0x1016e3286f40000, quorum=127.0.0.1:55864, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-24T18:55:26,129 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46733-0x1016e3286f40000, quorum=127.0.0.1:55864, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T18:55:26,139 DEBUG [M:0;f2b92657890a:46733 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/master already deleted, retry=false 2024-11-24T18:55:26,139 DEBUG [M:0;f2b92657890a:46733 {}] master.ActiveMasterManager(353): master:46733-0x1016e3286f40000, quorum=127.0.0.1:55864, baseZNode=/hbase Failed delete of our master address node; KeeperErrorCode = NoNode for /hbase/master 2024-11-24T18:55:26,140 INFO [M:0;f2b92657890a:46733 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:42029/user/jenkins/test-data/1785304b-54f9-90ba-b91d-b1c10ec14b84/.lastflushedseqids 2024-11-24T18:55:26,145 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41449 is added to blk_1073741849_1025 (size=130) 2024-11-24T18:55:26,146 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44711 is added to blk_1073741849_1025 (size=130) 2024-11-24T18:55:26,146 INFO [M:0;f2b92657890a:46733 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-24T18:55:26,146 INFO [M:0;f2b92657890a:46733 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-24T18:55:26,146 DEBUG [M:0;f2b92657890a:46733 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-24T18:55:26,146 INFO [M:0;f2b92657890a:46733 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T18:55:26,146 DEBUG [M:0;f2b92657890a:46733 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T18:55:26,146 DEBUG [M:0;f2b92657890a:46733 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-24T18:55:26,146 DEBUG [M:0;f2b92657890a:46733 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T18:55:26,146 INFO [M:0;f2b92657890a:46733 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=43.59 KB heapSize=55 KB 2024-11-24T18:55:26,163 DEBUG [M:0;f2b92657890a:46733 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42029/user/jenkins/test-data/1785304b-54f9-90ba-b91d-b1c10ec14b84/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/8676328a98d849d29b997e7121ce51dd is 82, key is hbase:meta,,1/info:regioninfo/1732474475089/Put/seqid=0 2024-11-24T18:55:26,167 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44711 is added to blk_1073741850_1026 (size=5672) 2024-11-24T18:55:26,167 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41449 is added to blk_1073741850_1026 (size=5672) 2024-11-24T18:55:26,168 INFO [M:0;f2b92657890a:46733 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:42029/user/jenkins/test-data/1785304b-54f9-90ba-b91d-b1c10ec14b84/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/8676328a98d849d29b997e7121ce51dd 2024-11-24T18:55:26,190 DEBUG [M:0;f2b92657890a:46733 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42029/user/jenkins/test-data/1785304b-54f9-90ba-b91d-b1c10ec14b84/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/c1be4eac2b8e40b690ecaf27bb4a6c4a is 798, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1732474475589/Put/seqid=0 2024-11-24T18:55:26,202 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44711 is added to blk_1073741851_1027 (size=7823) 2024-11-24T18:55:26,202 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41449 is added to blk_1073741851_1027 (size=7823) 2024-11-24T18:55:26,204 INFO [M:0;f2b92657890a:46733 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.99 KB at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:42029/user/jenkins/test-data/1785304b-54f9-90ba-b91d-b1c10ec14b84/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/c1be4eac2b8e40b690ecaf27bb4a6c4a 2024-11-24T18:55:26,208 INFO [RS:0;f2b92657890a:42587 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-24T18:55:26,208 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42587-0x1016e3286f40001, quorum=127.0.0.1:55864, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-24T18:55:26,208 INFO [RS:0;f2b92657890a:42587 {}] regionserver.HRegionServer(1031): Exiting; stopping=f2b92657890a,42587,1732474473474; zookeeper connection closed. 2024-11-24T18:55:26,208 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42587-0x1016e3286f40001, quorum=127.0.0.1:55864, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-24T18:55:26,209 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@35889d81 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@35889d81 2024-11-24T18:55:26,209 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-24T18:55:26,210 INFO [M:0;f2b92657890a:46733 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for c1be4eac2b8e40b690ecaf27bb4a6c4a 2024-11-24T18:55:26,227 DEBUG [M:0;f2b92657890a:46733 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42029/user/jenkins/test-data/1785304b-54f9-90ba-b91d-b1c10ec14b84/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/689ba9e6574d47e4b1560dbe7802c594 is 69, key is f2b92657890a,42587,1732474473474/rs:state/1732474474325/Put/seqid=0 2024-11-24T18:55:26,232 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44711 is added to blk_1073741852_1028 (size=5156) 2024-11-24T18:55:26,232 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41449 is added to blk_1073741852_1028 (size=5156) 2024-11-24T18:55:26,233 INFO [M:0;f2b92657890a:46733 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:42029/user/jenkins/test-data/1785304b-54f9-90ba-b91d-b1c10ec14b84/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/689ba9e6574d47e4b1560dbe7802c594 2024-11-24T18:55:26,256 DEBUG [M:0;f2b92657890a:46733 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42029/user/jenkins/test-data/1785304b-54f9-90ba-b91d-b1c10ec14b84/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/3da4636edb6945d18ed3828216871560 is 52, key is load_balancer_on/state:d/1732474475207/Put/seqid=0 2024-11-24T18:55:26,260 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41449 is added to blk_1073741853_1029 (size=5056) 2024-11-24T18:55:26,261 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44711 is added to blk_1073741853_1029 (size=5056) 2024-11-24T18:55:26,261 INFO [M:0;f2b92657890a:46733 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:42029/user/jenkins/test-data/1785304b-54f9-90ba-b91d-b1c10ec14b84/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/3da4636edb6945d18ed3828216871560 2024-11-24T18:55:26,269 DEBUG [M:0;f2b92657890a:46733 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42029/user/jenkins/test-data/1785304b-54f9-90ba-b91d-b1c10ec14b84/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/8676328a98d849d29b997e7121ce51dd as hdfs://localhost:42029/user/jenkins/test-data/1785304b-54f9-90ba-b91d-b1c10ec14b84/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/8676328a98d849d29b997e7121ce51dd 2024-11-24T18:55:26,275 INFO [M:0;f2b92657890a:46733 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42029/user/jenkins/test-data/1785304b-54f9-90ba-b91d-b1c10ec14b84/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/8676328a98d849d29b997e7121ce51dd, entries=8, sequenceid=121, filesize=5.5 K 2024-11-24T18:55:26,276 DEBUG [M:0;f2b92657890a:46733 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42029/user/jenkins/test-data/1785304b-54f9-90ba-b91d-b1c10ec14b84/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/c1be4eac2b8e40b690ecaf27bb4a6c4a as hdfs://localhost:42029/user/jenkins/test-data/1785304b-54f9-90ba-b91d-b1c10ec14b84/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/c1be4eac2b8e40b690ecaf27bb4a6c4a 2024-11-24T18:55:26,281 INFO [M:0;f2b92657890a:46733 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for c1be4eac2b8e40b690ecaf27bb4a6c4a 2024-11-24T18:55:26,281 INFO [M:0;f2b92657890a:46733 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42029/user/jenkins/test-data/1785304b-54f9-90ba-b91d-b1c10ec14b84/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/c1be4eac2b8e40b690ecaf27bb4a6c4a, entries=14, sequenceid=121, filesize=7.6 K 2024-11-24T18:55:26,283 DEBUG [M:0;f2b92657890a:46733 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42029/user/jenkins/test-data/1785304b-54f9-90ba-b91d-b1c10ec14b84/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/689ba9e6574d47e4b1560dbe7802c594 as hdfs://localhost:42029/user/jenkins/test-data/1785304b-54f9-90ba-b91d-b1c10ec14b84/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/689ba9e6574d47e4b1560dbe7802c594 2024-11-24T18:55:26,288 INFO [M:0;f2b92657890a:46733 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42029/user/jenkins/test-data/1785304b-54f9-90ba-b91d-b1c10ec14b84/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/689ba9e6574d47e4b1560dbe7802c594, entries=1, sequenceid=121, filesize=5.0 K 2024-11-24T18:55:26,289 DEBUG [M:0;f2b92657890a:46733 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42029/user/jenkins/test-data/1785304b-54f9-90ba-b91d-b1c10ec14b84/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/3da4636edb6945d18ed3828216871560 as hdfs://localhost:42029/user/jenkins/test-data/1785304b-54f9-90ba-b91d-b1c10ec14b84/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/3da4636edb6945d18ed3828216871560 2024-11-24T18:55:26,294 INFO [M:0;f2b92657890a:46733 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42029/user/jenkins/test-data/1785304b-54f9-90ba-b91d-b1c10ec14b84/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/3da4636edb6945d18ed3828216871560, entries=1, sequenceid=121, filesize=4.9 K 2024-11-24T18:55:26,295 INFO [M:0;f2b92657890a:46733 {}] regionserver.HRegion(3140): Finished flush of dataSize ~43.59 KB/44638, heapSize ~54.94 KB/56256, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 149ms, sequenceid=121, compaction requested=false 2024-11-24T18:55:26,298 INFO [M:0;f2b92657890a:46733 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T18:55:26,299 DEBUG [M:0;f2b92657890a:46733 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732474526146Disabling compacts and flushes for region at 1732474526146Disabling writes for close at 1732474526146Obtaining lock to block concurrent updates at 1732474526146Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732474526146Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=44638, getHeapSize=56256, getOffHeapSize=0, getCellsCount=140 at 1732474526147 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732474526147Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732474526147Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732474526163 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732474526163Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732474526173 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732474526190 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732474526190Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732474526210 (+20 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732474526227 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732474526227Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1732474526239 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1732474526255 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1732474526255Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@285b504f: reopening flushed file at 1732474526268 (+13 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3aa3ad0e: reopening flushed file at 1732474526275 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3e1c7198: reopening flushed file at 1732474526282 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@32c26722: reopening flushed file at 1732474526288 (+6 ms)Finished flush of dataSize ~43.59 KB/44638, heapSize ~54.94 KB/56256, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 149ms, sequenceid=121, compaction requested=false at 1732474526295 (+7 ms)Writing region close event to WAL at 1732474526298 (+3 ms)Closed at 1732474526298 2024-11-24T18:55:26,299 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:55:26,299 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:55:26,299 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:55:26,299 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:55:26,299 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:55:26,301 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41449 is added to blk_1073741830_1006 (size=53035) 2024-11-24T18:55:26,301 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44711 is added to blk_1073741830_1006 (size=53035) 2024-11-24T18:55:26,302 INFO [M:0;f2b92657890a:46733 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-24T18:55:26,302 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-24T18:55:26,302 INFO [M:0;f2b92657890a:46733 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:46733 2024-11-24T18:55:26,302 INFO [M:0;f2b92657890a:46733 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-24T18:55:26,343 INFO [regionserver/f2b92657890a:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-24T18:55:26,408 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46733-0x1016e3286f40000, quorum=127.0.0.1:55864, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-24T18:55:26,408 INFO [M:0;f2b92657890a:46733 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-24T18:55:26,408 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46733-0x1016e3286f40000, quorum=127.0.0.1:55864, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-24T18:55:26,411 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@65345c29{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T18:55:26,411 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@36f52998{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-24T18:55:26,411 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-24T18:55:26,411 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1b7fc8f3{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-24T18:55:26,412 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3e2a30ba{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/158c5696-4029-845c-c181-2e809f9880d1/hadoop.log.dir/,STOPPED} 2024-11-24T18:55:26,413 WARN [BP-1536927535-172.17.0.2-1732474470951 heartbeating to localhost/127.0.0.1:42029 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-24T18:55:26,413 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-24T18:55:26,413 WARN [BP-1536927535-172.17.0.2-1732474470951 heartbeating to localhost/127.0.0.1:42029 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1536927535-172.17.0.2-1732474470951 (Datanode Uuid 473c356e-3ce1-4bb3-9474-a0d8871fcc79) service to localhost/127.0.0.1:42029 2024-11-24T18:55:26,413 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-24T18:55:26,414 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/158c5696-4029-845c-c181-2e809f9880d1/cluster_fc72df8f-492b-1532-6ece-79f1d51753f3/data/data3/current/BP-1536927535-172.17.0.2-1732474470951 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T18:55:26,414 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/158c5696-4029-845c-c181-2e809f9880d1/cluster_fc72df8f-492b-1532-6ece-79f1d51753f3/data/data4/current/BP-1536927535-172.17.0.2-1732474470951 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T18:55:26,414 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-24T18:55:26,420 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@e6bebf5{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T18:55:26,421 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@261a9e0a{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-24T18:55:26,421 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-24T18:55:26,421 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7b079ea2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-24T18:55:26,421 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7a2ef153{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/158c5696-4029-845c-c181-2e809f9880d1/hadoop.log.dir/,STOPPED} 2024-11-24T18:55:26,423 WARN [BP-1536927535-172.17.0.2-1732474470951 heartbeating to localhost/127.0.0.1:42029 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-24T18:55:26,423 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-24T18:55:26,423 WARN [BP-1536927535-172.17.0.2-1732474470951 heartbeating to localhost/127.0.0.1:42029 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1536927535-172.17.0.2-1732474470951 (Datanode Uuid 93b93bea-d2c1-469c-9f3c-763bcade36b9) service to localhost/127.0.0.1:42029 2024-11-24T18:55:26,423 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-24T18:55:26,423 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/158c5696-4029-845c-c181-2e809f9880d1/cluster_fc72df8f-492b-1532-6ece-79f1d51753f3/data/data1/current/BP-1536927535-172.17.0.2-1732474470951 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T18:55:26,423 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/158c5696-4029-845c-c181-2e809f9880d1/cluster_fc72df8f-492b-1532-6ece-79f1d51753f3/data/data2/current/BP-1536927535-172.17.0.2-1732474470951 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T18:55:26,424 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-24T18:55:26,429 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5d8d7f9b{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-24T18:55:26,430 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2c020752{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-24T18:55:26,430 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-24T18:55:26,430 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@20e4ef1d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-24T18:55:26,430 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@8d4c846{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/158c5696-4029-845c-c181-2e809f9880d1/hadoop.log.dir/,STOPPED} 2024-11-24T18:55:26,435 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-24T18:55:26,456 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-24T18:55:26,464 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testCompactionRecordDoesntBlockRolling Thread=207 (was 182) Potentially hanging thread: nioEventLoopGroup-37-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-16 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:42029 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:42029 from jenkins.hfs.5 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-37-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.5@localhost:42029 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:42029 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:42029 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:42029 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-35-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:42029 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:42029 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) - Thread LEAK? -, OpenFileDescriptor=483 (was 457) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=205 (was 201) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=8236 (was 8811) 2024-11-24T18:55:26,470 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,43977,1732474387094/f2b92657890a%2C43977%2C1732474387094.1732474387326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:55:26,472 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRolling Thread=207, OpenFileDescriptor=483, MaxFileDescriptor=1048576, SystemLoadAverage=205, ProcessCount=11, AvailableMemoryMB=8236 2024-11-24T18:55:26,473 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-24T18:55:26,473 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/158c5696-4029-845c-c181-2e809f9880d1/hadoop.log.dir so I do NOT create it in target/test-data/882033f2-9673-329f-2c53-86bb3e2b6bbb 2024-11-24T18:55:26,473 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/158c5696-4029-845c-c181-2e809f9880d1/hadoop.tmp.dir so I do NOT create it in target/test-data/882033f2-9673-329f-2c53-86bb3e2b6bbb 2024-11-24T18:55:26,473 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/882033f2-9673-329f-2c53-86bb3e2b6bbb/cluster_53af99ab-4d80-99f9-07c3-d74bd4d340bf, deleteOnExit=true 2024-11-24T18:55:26,473 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-24T18:55:26,473 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/882033f2-9673-329f-2c53-86bb3e2b6bbb/test.cache.data in system properties and HBase conf 2024-11-24T18:55:26,474 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/882033f2-9673-329f-2c53-86bb3e2b6bbb/hadoop.tmp.dir in system properties and HBase conf 2024-11-24T18:55:26,474 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/882033f2-9673-329f-2c53-86bb3e2b6bbb/hadoop.log.dir in system properties and HBase conf 2024-11-24T18:55:26,474 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/882033f2-9673-329f-2c53-86bb3e2b6bbb/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-24T18:55:26,474 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/882033f2-9673-329f-2c53-86bb3e2b6bbb/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-24T18:55:26,474 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-24T18:55:26,474 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-24T18:55:26,474 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/882033f2-9673-329f-2c53-86bb3e2b6bbb/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-24T18:55:26,474 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/882033f2-9673-329f-2c53-86bb3e2b6bbb/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-24T18:55:26,475 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/882033f2-9673-329f-2c53-86bb3e2b6bbb/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-24T18:55:26,475 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/882033f2-9673-329f-2c53-86bb3e2b6bbb/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-24T18:55:26,475 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/882033f2-9673-329f-2c53-86bb3e2b6bbb/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-24T18:55:26,475 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/882033f2-9673-329f-2c53-86bb3e2b6bbb/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-24T18:55:26,475 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/882033f2-9673-329f-2c53-86bb3e2b6bbb/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-24T18:55:26,475 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/882033f2-9673-329f-2c53-86bb3e2b6bbb/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-24T18:55:26,475 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/882033f2-9673-329f-2c53-86bb3e2b6bbb/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-24T18:55:26,475 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/882033f2-9673-329f-2c53-86bb3e2b6bbb/nfs.dump.dir in system properties and HBase conf 2024-11-24T18:55:26,475 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/882033f2-9673-329f-2c53-86bb3e2b6bbb/java.io.tmpdir in system properties and HBase conf 2024-11-24T18:55:26,476 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/882033f2-9673-329f-2c53-86bb3e2b6bbb/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-24T18:55:26,476 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/882033f2-9673-329f-2c53-86bb3e2b6bbb/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-24T18:55:26,476 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/882033f2-9673-329f-2c53-86bb3e2b6bbb/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-24T18:55:26,490 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-24T18:55:26,490 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.meta.1732474386931.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:55:26,849 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T18:55:26,852 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-24T18:55:26,853 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-24T18:55:26,853 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-24T18:55:26,853 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-24T18:55:26,854 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T18:55:26,854 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3bc081d8{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/882033f2-9673-329f-2c53-86bb3e2b6bbb/hadoop.log.dir/,AVAILABLE} 2024-11-24T18:55:26,854 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4e36d39c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-24T18:55:26,958 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@f63b03b{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/882033f2-9673-329f-2c53-86bb3e2b6bbb/java.io.tmpdir/jetty-localhost-39985-hadoop-hdfs-3_4_1-tests_jar-_-any-11634093950570670367/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-24T18:55:26,958 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@622d58de{HTTP/1.1, (http/1.1)}{localhost:39985} 2024-11-24T18:55:26,958 INFO [Time-limited test {}] server.Server(415): Started @256310ms 2024-11-24T18:55:26,971 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-24T18:55:27,251 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T18:55:27,254 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-24T18:55:27,254 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-24T18:55:27,254 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-24T18:55:27,254 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-24T18:55:27,255 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@77b370f4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/882033f2-9673-329f-2c53-86bb3e2b6bbb/hadoop.log.dir/,AVAILABLE} 2024-11-24T18:55:27,255 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@767f877d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-24T18:55:27,357 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@25dcc129{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/882033f2-9673-329f-2c53-86bb3e2b6bbb/java.io.tmpdir/jetty-localhost-35127-hadoop-hdfs-3_4_1-tests_jar-_-any-9443354979963320253/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T18:55:27,358 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1b95b0ea{HTTP/1.1, (http/1.1)}{localhost:35127} 2024-11-24T18:55:27,358 INFO [Time-limited test {}] server.Server(415): Started @256710ms 2024-11-24T18:55:27,359 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-24T18:55:27,389 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T18:55:27,391 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-24T18:55:27,392 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-24T18:55:27,392 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-24T18:55:27,392 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-24T18:55:27,392 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7d4c2da4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/882033f2-9673-329f-2c53-86bb3e2b6bbb/hadoop.log.dir/,AVAILABLE} 2024-11-24T18:55:27,393 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@29765213{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-24T18:55:27,470 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,43977,1732474387094/f2b92657890a%2C43977%2C1732474387094.1732474387326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:55:27,491 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.meta.1732474386931.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:55:27,494 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5c3bfe4f{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/882033f2-9673-329f-2c53-86bb3e2b6bbb/java.io.tmpdir/jetty-localhost-38559-hadoop-hdfs-3_4_1-tests_jar-_-any-16215635890135737553/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T18:55:27,494 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5dea9c62{HTTP/1.1, (http/1.1)}{localhost:38559} 2024-11-24T18:55:27,494 INFO [Time-limited test {}] server.Server(415): Started @256846ms 2024-11-24T18:55:27,495 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-24T18:55:28,327 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-24T18:55:28,327 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-24T18:55:28,328 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-24T18:55:28,328 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-24T18:55:28,471 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,43977,1732474387094/f2b92657890a%2C43977%2C1732474387094.1732474387326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:55:28,492 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.meta.1732474386931.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:55:28,580 WARN [Thread-1966 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/882033f2-9673-329f-2c53-86bb3e2b6bbb/cluster_53af99ab-4d80-99f9-07c3-d74bd4d340bf/data/data1/current/BP-729962942-172.17.0.2-1732474526503/current, will proceed with Du for space computation calculation, 2024-11-24T18:55:28,580 WARN [Thread-1967 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/882033f2-9673-329f-2c53-86bb3e2b6bbb/cluster_53af99ab-4d80-99f9-07c3-d74bd4d340bf/data/data2/current/BP-729962942-172.17.0.2-1732474526503/current, will proceed with Du for space computation calculation, 2024-11-24T18:55:28,598 WARN [Thread-1930 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-24T18:55:28,600 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x328fef5620c89700 with lease ID 0x3f568d27a85cb16: Processing first storage report for DS-30da9416-b737-465e-b650-3b2848412caf from datanode DatanodeRegistration(127.0.0.1:44655, datanodeUuid=872c0dfe-3149-46fd-b8b4-0b7c9bae5684, infoPort=32941, infoSecurePort=0, ipcPort=45383, storageInfo=lv=-57;cid=testClusterID;nsid=1034807687;c=1732474526503) 2024-11-24T18:55:28,600 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x328fef5620c89700 with lease ID 0x3f568d27a85cb16: from storage DS-30da9416-b737-465e-b650-3b2848412caf node DatanodeRegistration(127.0.0.1:44655, datanodeUuid=872c0dfe-3149-46fd-b8b4-0b7c9bae5684, infoPort=32941, infoSecurePort=0, ipcPort=45383, storageInfo=lv=-57;cid=testClusterID;nsid=1034807687;c=1732474526503), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T18:55:28,600 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x328fef5620c89700 with lease ID 0x3f568d27a85cb16: Processing first storage report for DS-abfca7da-b2a6-4705-9645-dc4062238f26 from datanode DatanodeRegistration(127.0.0.1:44655, datanodeUuid=872c0dfe-3149-46fd-b8b4-0b7c9bae5684, infoPort=32941, infoSecurePort=0, ipcPort=45383, storageInfo=lv=-57;cid=testClusterID;nsid=1034807687;c=1732474526503) 2024-11-24T18:55:28,600 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x328fef5620c89700 with lease ID 0x3f568d27a85cb16: from storage DS-abfca7da-b2a6-4705-9645-dc4062238f26 node DatanodeRegistration(127.0.0.1:44655, datanodeUuid=872c0dfe-3149-46fd-b8b4-0b7c9bae5684, infoPort=32941, infoSecurePort=0, ipcPort=45383, storageInfo=lv=-57;cid=testClusterID;nsid=1034807687;c=1732474526503), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T18:55:28,738 WARN [Thread-1978 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/882033f2-9673-329f-2c53-86bb3e2b6bbb/cluster_53af99ab-4d80-99f9-07c3-d74bd4d340bf/data/data4/current/BP-729962942-172.17.0.2-1732474526503/current, will proceed with Du for space computation calculation, 2024-11-24T18:55:28,738 WARN [Thread-1977 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/882033f2-9673-329f-2c53-86bb3e2b6bbb/cluster_53af99ab-4d80-99f9-07c3-d74bd4d340bf/data/data3/current/BP-729962942-172.17.0.2-1732474526503/current, will proceed with Du for space computation calculation, 2024-11-24T18:55:28,757 WARN [Thread-1953 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-24T18:55:28,759 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x77810bd38b93ccf5 with lease ID 0x3f568d27a85cb17: Processing first storage report for DS-314c0f6b-2e20-433c-93e4-a974f9e96106 from datanode DatanodeRegistration(127.0.0.1:46341, datanodeUuid=9a2e26e3-8f4c-4eb2-8bb8-052bd469fa20, infoPort=39541, infoSecurePort=0, ipcPort=42923, storageInfo=lv=-57;cid=testClusterID;nsid=1034807687;c=1732474526503) 2024-11-24T18:55:28,759 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x77810bd38b93ccf5 with lease ID 0x3f568d27a85cb17: from storage DS-314c0f6b-2e20-433c-93e4-a974f9e96106 node DatanodeRegistration(127.0.0.1:46341, datanodeUuid=9a2e26e3-8f4c-4eb2-8bb8-052bd469fa20, infoPort=39541, infoSecurePort=0, ipcPort=42923, storageInfo=lv=-57;cid=testClusterID;nsid=1034807687;c=1732474526503), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T18:55:28,759 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x77810bd38b93ccf5 with lease ID 0x3f568d27a85cb17: Processing first storage report for DS-53ccb6ca-9848-401e-bd9a-d06f54132173 from datanode DatanodeRegistration(127.0.0.1:46341, datanodeUuid=9a2e26e3-8f4c-4eb2-8bb8-052bd469fa20, infoPort=39541, infoSecurePort=0, ipcPort=42923, storageInfo=lv=-57;cid=testClusterID;nsid=1034807687;c=1732474526503) 2024-11-24T18:55:28,759 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x77810bd38b93ccf5 with lease ID 0x3f568d27a85cb17: from storage DS-53ccb6ca-9848-401e-bd9a-d06f54132173 node DatanodeRegistration(127.0.0.1:46341, datanodeUuid=9a2e26e3-8f4c-4eb2-8bb8-052bd469fa20, infoPort=39541, infoSecurePort=0, ipcPort=42923, storageInfo=lv=-57;cid=testClusterID;nsid=1034807687;c=1732474526503), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T18:55:28,826 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/882033f2-9673-329f-2c53-86bb3e2b6bbb 2024-11-24T18:55:28,829 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/882033f2-9673-329f-2c53-86bb3e2b6bbb/cluster_53af99ab-4d80-99f9-07c3-d74bd4d340bf/zookeeper_0, clientPort=57443, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/882033f2-9673-329f-2c53-86bb3e2b6bbb/cluster_53af99ab-4d80-99f9-07c3-d74bd4d340bf/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/882033f2-9673-329f-2c53-86bb3e2b6bbb/cluster_53af99ab-4d80-99f9-07c3-d74bd4d340bf/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-24T18:55:28,830 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=57443 2024-11-24T18:55:28,830 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T18:55:28,832 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T18:55:28,841 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46341 is added to blk_1073741825_1001 (size=7) 2024-11-24T18:55:28,841 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44655 is added to blk_1073741825_1001 (size=7) 2024-11-24T18:55:28,842 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35 with version=8 2024-11-24T18:55:28,842 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:40437/user/jenkins/test-data/91bb223a-a964-b097-0799-aa34536414f2/hbase-staging 2024-11-24T18:55:28,845 INFO [Time-limited test {}] client.ConnectionUtils(128): master/f2b92657890a:0 server-side Connection retries=45 2024-11-24T18:55:28,845 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-24T18:55:28,845 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-24T18:55:28,845 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-24T18:55:28,845 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-24T18:55:28,845 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-24T18:55:28,845 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-24T18:55:28,845 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-24T18:55:28,846 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:39497 2024-11-24T18:55:28,849 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:39497 connecting to ZooKeeper ensemble=127.0.0.1:57443 2024-11-24T18:55:28,907 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:394970x0, quorum=127.0.0.1:57443, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-24T18:55:28,908 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:39497-0x1016e335fe80000 connected 2024-11-24T18:55:28,992 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T18:55:28,994 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T18:55:28,996 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:39497-0x1016e335fe80000, quorum=127.0.0.1:57443, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-24T18:55:28,996 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35, hbase.cluster.distributed=false 2024-11-24T18:55:28,998 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:39497-0x1016e335fe80000, quorum=127.0.0.1:57443, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-24T18:55:28,999 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=39497 2024-11-24T18:55:28,999 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=39497 2024-11-24T18:55:28,999 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=39497 2024-11-24T18:55:28,999 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=39497 2024-11-24T18:55:29,000 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=39497 2024-11-24T18:55:29,015 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/f2b92657890a:0 server-side Connection retries=45 2024-11-24T18:55:29,015 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-24T18:55:29,016 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-24T18:55:29,016 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-24T18:55:29,016 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-24T18:55:29,016 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-24T18:55:29,016 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-24T18:55:29,016 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-24T18:55:29,016 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:38787 2024-11-24T18:55:29,018 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:38787 connecting to ZooKeeper ensemble=127.0.0.1:57443 2024-11-24T18:55:29,018 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T18:55:29,020 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T18:55:29,034 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:387870x0, quorum=127.0.0.1:57443, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-24T18:55:29,034 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:38787-0x1016e335fe80001 connected 2024-11-24T18:55:29,034 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38787-0x1016e335fe80001, quorum=127.0.0.1:57443, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-24T18:55:29,035 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-24T18:55:29,035 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-24T18:55:29,036 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38787-0x1016e335fe80001, quorum=127.0.0.1:57443, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-24T18:55:29,037 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38787-0x1016e335fe80001, quorum=127.0.0.1:57443, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-24T18:55:29,037 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=38787 2024-11-24T18:55:29,037 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=38787 2024-11-24T18:55:29,038 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=38787 2024-11-24T18:55:29,038 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=38787 2024-11-24T18:55:29,038 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=38787 2024-11-24T18:55:29,053 DEBUG [M:0;f2b92657890a:39497 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;f2b92657890a:39497 2024-11-24T18:55:29,053 INFO [master/f2b92657890a:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/f2b92657890a,39497,1732474528844 2024-11-24T18:55:29,065 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38787-0x1016e335fe80001, quorum=127.0.0.1:57443, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-24T18:55:29,065 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39497-0x1016e335fe80000, quorum=127.0.0.1:57443, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-24T18:55:29,066 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:39497-0x1016e335fe80000, quorum=127.0.0.1:57443, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/f2b92657890a,39497,1732474528844 2024-11-24T18:55:29,076 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38787-0x1016e335fe80001, quorum=127.0.0.1:57443, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-24T18:55:29,076 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39497-0x1016e335fe80000, quorum=127.0.0.1:57443, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T18:55:29,076 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38787-0x1016e335fe80001, quorum=127.0.0.1:57443, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T18:55:29,076 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:39497-0x1016e335fe80000, quorum=127.0.0.1:57443, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-24T18:55:29,077 INFO [master/f2b92657890a:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/f2b92657890a,39497,1732474528844 from backup master directory 2024-11-24T18:55:29,086 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39497-0x1016e335fe80000, quorum=127.0.0.1:57443, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/f2b92657890a,39497,1732474528844 2024-11-24T18:55:29,086 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38787-0x1016e335fe80001, quorum=127.0.0.1:57443, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-24T18:55:29,087 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39497-0x1016e335fe80000, quorum=127.0.0.1:57443, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-24T18:55:29,087 WARN [master/f2b92657890a:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-24T18:55:29,087 INFO [master/f2b92657890a:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=f2b92657890a,39497,1732474528844 2024-11-24T18:55:29,091 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/hbase.id] with ID: aa239d52-b99b-42ce-bcb7-159a372862e9 2024-11-24T18:55:29,091 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/.tmp/hbase.id 2024-11-24T18:55:29,097 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46341 is added to blk_1073741826_1002 (size=42) 2024-11-24T18:55:29,097 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44655 is added to blk_1073741826_1002 (size=42) 2024-11-24T18:55:29,097 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/.tmp/hbase.id]:[hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/hbase.id] 2024-11-24T18:55:29,109 INFO [master/f2b92657890a:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T18:55:29,110 INFO [master/f2b92657890a:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-24T18:55:29,111 INFO [master/f2b92657890a:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-24T18:55:29,118 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38787-0x1016e335fe80001, quorum=127.0.0.1:57443, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T18:55:29,118 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39497-0x1016e335fe80000, quorum=127.0.0.1:57443, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T18:55:29,124 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46341 is added to blk_1073741827_1003 (size=196) 2024-11-24T18:55:29,125 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44655 is added to blk_1073741827_1003 (size=196) 2024-11-24T18:55:29,125 INFO [master/f2b92657890a:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-24T18:55:29,126 INFO [master/f2b92657890a:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-24T18:55:29,126 INFO [master/f2b92657890a:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-24T18:55:29,134 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44655 is added to blk_1073741828_1004 (size=1189) 2024-11-24T18:55:29,134 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46341 is added to blk_1073741828_1004 (size=1189) 2024-11-24T18:55:29,135 INFO [master/f2b92657890a:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/MasterData/data/master/store 2024-11-24T18:55:29,141 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46341 is added to blk_1073741829_1005 (size=34) 2024-11-24T18:55:29,141 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44655 is added to blk_1073741829_1005 (size=34) 2024-11-24T18:55:29,142 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T18:55:29,142 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-24T18:55:29,142 INFO [master/f2b92657890a:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T18:55:29,142 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T18:55:29,142 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-24T18:55:29,142 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T18:55:29,142 INFO [master/f2b92657890a:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T18:55:29,142 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732474529142Disabling compacts and flushes for region at 1732474529142Disabling writes for close at 1732474529142Writing region close event to WAL at 1732474529142Closed at 1732474529142 2024-11-24T18:55:29,143 WARN [master/f2b92657890a:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/MasterData/data/master/store/.initializing 2024-11-24T18:55:29,143 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/MasterData/WALs/f2b92657890a,39497,1732474528844 2024-11-24T18:55:29,145 INFO [master/f2b92657890a:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=f2b92657890a%2C39497%2C1732474528844, suffix=, logDir=hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/MasterData/WALs/f2b92657890a,39497,1732474528844, archiveDir=hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/MasterData/oldWALs, maxLogs=10 2024-11-24T18:55:29,146 INFO [master/f2b92657890a:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor f2b92657890a%2C39497%2C1732474528844.1732474529145 2024-11-24T18:55:29,150 INFO [master/f2b92657890a:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/MasterData/WALs/f2b92657890a,39497,1732474528844/f2b92657890a%2C39497%2C1732474528844.1732474529145 2024-11-24T18:55:29,151 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39541:39541),(127.0.0.1/127.0.0.1:32941:32941)] 2024-11-24T18:55:29,152 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-24T18:55:29,152 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T18:55:29,152 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T18:55:29,172 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T18:55:29,174 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-24T18:55:29,175 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-24T18:55:29,175 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T18:55:29,175 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T18:55:29,175 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-24T18:55:29,176 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-24T18:55:29,176 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T18:55:29,177 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T18:55:29,177 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-24T18:55:29,178 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-24T18:55:29,178 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T18:55:29,178 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T18:55:29,179 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-24T18:55:29,180 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-24T18:55:29,180 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T18:55:29,180 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T18:55:29,180 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T18:55:29,181 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-24T18:55:29,181 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-24T18:55:29,182 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T18:55:29,182 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T18:55:29,183 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-24T18:55:29,184 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T18:55:29,186 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-24T18:55:29,186 INFO [master/f2b92657890a:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=697758, jitterRate=-0.11275598406791687}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-24T18:55:29,187 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732474529172Initializing all the Stores at 1732474529173 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732474529173Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732474529173Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732474529174 (+1 ms)Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732474529174Cleaning up temporary data from old regions at 1732474529182 (+8 ms)Region opened successfully at 1732474529187 (+5 ms) 2024-11-24T18:55:29,187 INFO [master/f2b92657890a:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-24T18:55:29,190 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5d8355cd, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=f2b92657890a/172.17.0.2:0 2024-11-24T18:55:29,191 INFO [master/f2b92657890a:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-24T18:55:29,191 INFO [master/f2b92657890a:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-24T18:55:29,191 INFO [master/f2b92657890a:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-24T18:55:29,191 INFO [master/f2b92657890a:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-24T18:55:29,192 INFO [master/f2b92657890a:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-24T18:55:29,192 INFO [master/f2b92657890a:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-24T18:55:29,192 INFO [master/f2b92657890a:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-24T18:55:29,194 INFO [master/f2b92657890a:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-24T18:55:29,195 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39497-0x1016e335fe80000, quorum=127.0.0.1:57443, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-24T18:55:29,202 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-24T18:55:29,203 INFO [master/f2b92657890a:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-24T18:55:29,204 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39497-0x1016e335fe80000, quorum=127.0.0.1:57443, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-24T18:55:29,213 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-24T18:55:29,213 INFO [master/f2b92657890a:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-24T18:55:29,214 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39497-0x1016e335fe80000, quorum=127.0.0.1:57443, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-24T18:55:29,223 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-24T18:55:29,224 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39497-0x1016e335fe80000, quorum=127.0.0.1:57443, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-24T18:55:29,234 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-24T18:55:29,236 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39497-0x1016e335fe80000, quorum=127.0.0.1:57443, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-24T18:55:29,244 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-24T18:55:29,255 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38787-0x1016e335fe80001, quorum=127.0.0.1:57443, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-24T18:55:29,255 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39497-0x1016e335fe80000, quorum=127.0.0.1:57443, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-24T18:55:29,255 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38787-0x1016e335fe80001, quorum=127.0.0.1:57443, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T18:55:29,255 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39497-0x1016e335fe80000, quorum=127.0.0.1:57443, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T18:55:29,255 INFO [master/f2b92657890a:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=f2b92657890a,39497,1732474528844, sessionid=0x1016e335fe80000, setting cluster-up flag (Was=false) 2024-11-24T18:55:29,276 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38787-0x1016e335fe80001, quorum=127.0.0.1:57443, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T18:55:29,276 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39497-0x1016e335fe80000, quorum=127.0.0.1:57443, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T18:55:29,307 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-24T18:55:29,309 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=f2b92657890a,39497,1732474528844 2024-11-24T18:55:29,328 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39497-0x1016e335fe80000, quorum=127.0.0.1:57443, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T18:55:29,328 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38787-0x1016e335fe80001, quorum=127.0.0.1:57443, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T18:55:29,360 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-24T18:55:29,361 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=f2b92657890a,39497,1732474528844 2024-11-24T18:55:29,362 INFO [master/f2b92657890a:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-24T18:55:29,364 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-24T18:55:29,364 INFO [master/f2b92657890a:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-24T18:55:29,364 INFO [master/f2b92657890a:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-24T18:55:29,365 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: f2b92657890a,39497,1732474528844 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-24T18:55:29,366 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/f2b92657890a:0, corePoolSize=5, maxPoolSize=5 2024-11-24T18:55:29,366 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/f2b92657890a:0, corePoolSize=5, maxPoolSize=5 2024-11-24T18:55:29,366 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/f2b92657890a:0, corePoolSize=5, maxPoolSize=5 2024-11-24T18:55:29,366 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/f2b92657890a:0, corePoolSize=5, maxPoolSize=5 2024-11-24T18:55:29,366 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/f2b92657890a:0, corePoolSize=10, maxPoolSize=10 2024-11-24T18:55:29,366 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/f2b92657890a:0, corePoolSize=1, maxPoolSize=1 2024-11-24T18:55:29,366 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/f2b92657890a:0, corePoolSize=2, maxPoolSize=2 2024-11-24T18:55:29,366 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/f2b92657890a:0, corePoolSize=1, maxPoolSize=1 2024-11-24T18:55:29,367 INFO [master/f2b92657890a:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732474559367 2024-11-24T18:55:29,368 INFO [master/f2b92657890a:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-24T18:55:29,368 INFO [master/f2b92657890a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-24T18:55:29,368 INFO [master/f2b92657890a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-24T18:55:29,368 INFO [master/f2b92657890a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-24T18:55:29,368 INFO [master/f2b92657890a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-24T18:55:29,368 INFO [master/f2b92657890a:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-24T18:55:29,368 INFO [master/f2b92657890a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-24T18:55:29,369 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-24T18:55:29,369 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-24T18:55:29,369 INFO [master/f2b92657890a:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-24T18:55:29,369 INFO [master/f2b92657890a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-24T18:55:29,369 INFO [master/f2b92657890a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-24T18:55:29,370 INFO [master/f2b92657890a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-24T18:55:29,370 INFO [master/f2b92657890a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-24T18:55:29,370 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/f2b92657890a:0:becomeActiveMaster-HFileCleaner.large.0-1732474529370,5,FailOnTimeoutGroup] 2024-11-24T18:55:29,370 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T18:55:29,370 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/f2b92657890a:0:becomeActiveMaster-HFileCleaner.small.0-1732474529370,5,FailOnTimeoutGroup] 2024-11-24T18:55:29,370 INFO [master/f2b92657890a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-24T18:55:29,370 INFO [master/f2b92657890a:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-24T18:55:29,370 INFO [master/f2b92657890a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-24T18:55:29,370 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-24T18:55:29,370 INFO [master/f2b92657890a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-24T18:55:29,376 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44655 is added to blk_1073741831_1007 (size=1321) 2024-11-24T18:55:29,376 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46341 is added to blk_1073741831_1007 (size=1321) 2024-11-24T18:55:29,377 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-24T18:55:29,377 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35 2024-11-24T18:55:29,383 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46341 is added to blk_1073741832_1008 (size=32) 2024-11-24T18:55:29,384 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44655 is added to blk_1073741832_1008 (size=32) 2024-11-24T18:55:29,384 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T18:55:29,386 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-24T18:55:29,387 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-24T18:55:29,387 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T18:55:29,388 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T18:55:29,388 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-24T18:55:29,389 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-24T18:55:29,389 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T18:55:29,390 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T18:55:29,390 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-24T18:55:29,392 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-24T18:55:29,392 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T18:55:29,392 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T18:55:29,392 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-24T18:55:29,394 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-24T18:55:29,394 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T18:55:29,394 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T18:55:29,394 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-24T18:55:29,395 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/hbase/meta/1588230740 2024-11-24T18:55:29,395 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/hbase/meta/1588230740 2024-11-24T18:55:29,397 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-24T18:55:29,397 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-24T18:55:29,398 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-24T18:55:29,399 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-24T18:55:29,401 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-24T18:55:29,402 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=876754, jitterRate=0.11485061049461365}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-24T18:55:29,402 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732474529385Initializing all the Stores at 1732474529385Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732474529385Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732474529386 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732474529386Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732474529386Cleaning up temporary data from old regions at 1732474529397 (+11 ms)Region opened successfully at 1732474529402 (+5 ms) 2024-11-24T18:55:29,403 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-24T18:55:29,403 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-24T18:55:29,403 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-24T18:55:29,403 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-24T18:55:29,403 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-24T18:55:29,403 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-24T18:55:29,403 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732474529403Disabling compacts and flushes for region at 1732474529403Disabling writes for close at 1732474529403Writing region close event to WAL at 1732474529403Closed at 1732474529403 2024-11-24T18:55:29,405 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-24T18:55:29,405 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-24T18:55:29,405 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-24T18:55:29,406 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-24T18:55:29,407 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-24T18:55:29,440 INFO [RS:0;f2b92657890a:38787 {}] regionserver.HRegionServer(746): ClusterId : aa239d52-b99b-42ce-bcb7-159a372862e9 2024-11-24T18:55:29,441 DEBUG [RS:0;f2b92657890a:38787 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-24T18:55:29,445 DEBUG [RS:0;f2b92657890a:38787 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-24T18:55:29,445 DEBUG [RS:0;f2b92657890a:38787 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-24T18:55:29,456 DEBUG [RS:0;f2b92657890a:38787 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-24T18:55:29,456 DEBUG [RS:0;f2b92657890a:38787 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@323cc043, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=f2b92657890a/172.17.0.2:0 2024-11-24T18:55:29,469 DEBUG [RS:0;f2b92657890a:38787 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;f2b92657890a:38787 2024-11-24T18:55:29,469 INFO [RS:0;f2b92657890a:38787 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-24T18:55:29,469 INFO [RS:0;f2b92657890a:38787 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-24T18:55:29,469 DEBUG [RS:0;f2b92657890a:38787 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-24T18:55:29,469 INFO [RS:0;f2b92657890a:38787 {}] regionserver.HRegionServer(2659): reportForDuty to master=f2b92657890a,39497,1732474528844 with port=38787, startcode=1732474529015 2024-11-24T18:55:29,470 DEBUG [RS:0;f2b92657890a:38787 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-24T18:55:29,471 INFO [HMaster-EventLoopGroup-14-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49403, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.6 (auth:SIMPLE), service=RegionServerStatusService 2024-11-24T18:55:29,471 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,43977,1732474387094/f2b92657890a%2C43977%2C1732474387094.1732474387326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:55:29,472 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39497 {}] master.ServerManager(363): Checking decommissioned status of RegionServer f2b92657890a,38787,1732474529015 2024-11-24T18:55:29,472 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39497 {}] master.ServerManager(517): Registering regionserver=f2b92657890a,38787,1732474529015 2024-11-24T18:55:29,474 DEBUG [RS:0;f2b92657890a:38787 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35 2024-11-24T18:55:29,474 DEBUG [RS:0;f2b92657890a:38787 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:33185 2024-11-24T18:55:29,474 DEBUG [RS:0;f2b92657890a:38787 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-24T18:55:29,486 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39497-0x1016e335fe80000, quorum=127.0.0.1:57443, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-24T18:55:29,487 DEBUG [RS:0;f2b92657890a:38787 {}] zookeeper.ZKUtil(111): regionserver:38787-0x1016e335fe80001, quorum=127.0.0.1:57443, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/f2b92657890a,38787,1732474529015 2024-11-24T18:55:29,487 WARN [RS:0;f2b92657890a:38787 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-24T18:55:29,487 INFO [RS:0;f2b92657890a:38787 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-24T18:55:29,487 DEBUG [RS:0;f2b92657890a:38787 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/WALs/f2b92657890a,38787,1732474529015 2024-11-24T18:55:29,487 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [f2b92657890a,38787,1732474529015] 2024-11-24T18:55:29,490 INFO [RS:0;f2b92657890a:38787 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-24T18:55:29,492 INFO [RS:0;f2b92657890a:38787 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-24T18:55:29,492 INFO [RS:0;f2b92657890a:38787 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-24T18:55:29,492 INFO [RS:0;f2b92657890a:38787 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T18:55:29,493 INFO [RS:0;f2b92657890a:38787 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-24T18:55:29,492 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.meta.1732474386931.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:55:29,493 INFO [RS:0;f2b92657890a:38787 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-24T18:55:29,493 INFO [RS:0;f2b92657890a:38787 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-24T18:55:29,494 DEBUG [RS:0;f2b92657890a:38787 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/f2b92657890a:0, corePoolSize=1, maxPoolSize=1 2024-11-24T18:55:29,494 DEBUG [RS:0;f2b92657890a:38787 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/f2b92657890a:0, corePoolSize=1, maxPoolSize=1 2024-11-24T18:55:29,494 DEBUG [RS:0;f2b92657890a:38787 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/f2b92657890a:0, corePoolSize=1, maxPoolSize=1 2024-11-24T18:55:29,494 DEBUG [RS:0;f2b92657890a:38787 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/f2b92657890a:0, corePoolSize=1, maxPoolSize=1 2024-11-24T18:55:29,494 DEBUG [RS:0;f2b92657890a:38787 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/f2b92657890a:0, corePoolSize=1, maxPoolSize=1 2024-11-24T18:55:29,494 DEBUG [RS:0;f2b92657890a:38787 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/f2b92657890a:0, corePoolSize=2, maxPoolSize=2 2024-11-24T18:55:29,494 DEBUG [RS:0;f2b92657890a:38787 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/f2b92657890a:0, corePoolSize=1, maxPoolSize=1 2024-11-24T18:55:29,494 DEBUG [RS:0;f2b92657890a:38787 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/f2b92657890a:0, corePoolSize=1, maxPoolSize=1 2024-11-24T18:55:29,494 DEBUG [RS:0;f2b92657890a:38787 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/f2b92657890a:0, corePoolSize=1, maxPoolSize=1 2024-11-24T18:55:29,494 DEBUG [RS:0;f2b92657890a:38787 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/f2b92657890a:0, corePoolSize=1, maxPoolSize=1 2024-11-24T18:55:29,494 DEBUG [RS:0;f2b92657890a:38787 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/f2b92657890a:0, corePoolSize=1, maxPoolSize=1 2024-11-24T18:55:29,494 DEBUG [RS:0;f2b92657890a:38787 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/f2b92657890a:0, corePoolSize=1, maxPoolSize=1 2024-11-24T18:55:29,494 DEBUG [RS:0;f2b92657890a:38787 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/f2b92657890a:0, corePoolSize=3, maxPoolSize=3 2024-11-24T18:55:29,494 DEBUG [RS:0;f2b92657890a:38787 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/f2b92657890a:0, corePoolSize=3, maxPoolSize=3 2024-11-24T18:55:29,495 INFO [RS:0;f2b92657890a:38787 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-24T18:55:29,495 INFO [RS:0;f2b92657890a:38787 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-24T18:55:29,495 INFO [RS:0;f2b92657890a:38787 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T18:55:29,495 INFO [RS:0;f2b92657890a:38787 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-24T18:55:29,495 INFO [RS:0;f2b92657890a:38787 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-24T18:55:29,495 INFO [RS:0;f2b92657890a:38787 {}] hbase.ChoreService(168): Chore ScheduledChore name=f2b92657890a,38787,1732474529015-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-24T18:55:29,513 INFO [RS:0;f2b92657890a:38787 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-24T18:55:29,513 INFO [RS:0;f2b92657890a:38787 {}] hbase.ChoreService(168): Chore ScheduledChore name=f2b92657890a,38787,1732474529015-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T18:55:29,513 INFO [RS:0;f2b92657890a:38787 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T18:55:29,513 INFO [RS:0;f2b92657890a:38787 {}] regionserver.Replication(171): f2b92657890a,38787,1732474529015 started 2024-11-24T18:55:29,531 INFO [RS:0;f2b92657890a:38787 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T18:55:29,531 INFO [RS:0;f2b92657890a:38787 {}] regionserver.HRegionServer(1482): Serving as f2b92657890a,38787,1732474529015, RpcServer on f2b92657890a/172.17.0.2:38787, sessionid=0x1016e335fe80001 2024-11-24T18:55:29,531 DEBUG [RS:0;f2b92657890a:38787 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-24T18:55:29,531 DEBUG [RS:0;f2b92657890a:38787 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager f2b92657890a,38787,1732474529015 2024-11-24T18:55:29,531 DEBUG [RS:0;f2b92657890a:38787 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'f2b92657890a,38787,1732474529015' 2024-11-24T18:55:29,531 DEBUG [RS:0;f2b92657890a:38787 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-24T18:55:29,532 DEBUG [RS:0;f2b92657890a:38787 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-24T18:55:29,532 DEBUG [RS:0;f2b92657890a:38787 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-24T18:55:29,532 DEBUG [RS:0;f2b92657890a:38787 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-24T18:55:29,532 DEBUG [RS:0;f2b92657890a:38787 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager f2b92657890a,38787,1732474529015 2024-11-24T18:55:29,532 DEBUG [RS:0;f2b92657890a:38787 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'f2b92657890a,38787,1732474529015' 2024-11-24T18:55:29,532 DEBUG [RS:0;f2b92657890a:38787 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-24T18:55:29,532 DEBUG [RS:0;f2b92657890a:38787 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-24T18:55:29,533 DEBUG [RS:0;f2b92657890a:38787 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-24T18:55:29,533 INFO [RS:0;f2b92657890a:38787 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-24T18:55:29,533 INFO [RS:0;f2b92657890a:38787 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-24T18:55:29,557 WARN [f2b92657890a:39497 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-24T18:55:29,634 INFO [RS:0;f2b92657890a:38787 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=f2b92657890a%2C38787%2C1732474529015, suffix=, logDir=hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/WALs/f2b92657890a,38787,1732474529015, archiveDir=hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/oldWALs, maxLogs=32 2024-11-24T18:55:29,635 INFO [RS:0;f2b92657890a:38787 {}] monitor.StreamSlowMonitor(122): New stream slow monitor f2b92657890a%2C38787%2C1732474529015.1732474529635 2024-11-24T18:55:29,640 INFO [RS:0;f2b92657890a:38787 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/WALs/f2b92657890a,38787,1732474529015/f2b92657890a%2C38787%2C1732474529015.1732474529635 2024-11-24T18:55:29,641 DEBUG [RS:0;f2b92657890a:38787 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39541:39541),(127.0.0.1/127.0.0.1:32941:32941)] 2024-11-24T18:55:29,808 DEBUG [f2b92657890a:39497 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-24T18:55:29,808 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=f2b92657890a,38787,1732474529015 2024-11-24T18:55:29,810 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as f2b92657890a,38787,1732474529015, state=OPENING 2024-11-24T18:55:29,834 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-24T18:55:29,844 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39497-0x1016e335fe80000, quorum=127.0.0.1:57443, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T18:55:29,844 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38787-0x1016e335fe80001, quorum=127.0.0.1:57443, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T18:55:29,845 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-24T18:55:29,845 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-24T18:55:29,845 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-24T18:55:29,845 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=f2b92657890a,38787,1732474529015}] 2024-11-24T18:55:29,999 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-24T18:55:30,002 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59167, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-24T18:55:30,006 INFO [RS_OPEN_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-24T18:55:30,006 INFO [RS_OPEN_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-24T18:55:30,008 INFO [RS_OPEN_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=f2b92657890a%2C38787%2C1732474529015.meta, suffix=.meta, logDir=hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/WALs/f2b92657890a,38787,1732474529015, archiveDir=hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/oldWALs, maxLogs=32 2024-11-24T18:55:30,009 INFO [RS_OPEN_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor f2b92657890a%2C38787%2C1732474529015.meta.1732474530008.meta 2024-11-24T18:55:30,015 INFO [RS_OPEN_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/WALs/f2b92657890a,38787,1732474529015/f2b92657890a%2C38787%2C1732474529015.meta.1732474530008.meta 2024-11-24T18:55:30,016 DEBUG [RS_OPEN_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39541:39541),(127.0.0.1/127.0.0.1:32941:32941)] 2024-11-24T18:55:30,017 DEBUG [RS_OPEN_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-24T18:55:30,017 DEBUG [RS_OPEN_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-24T18:55:30,017 DEBUG [RS_OPEN_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-24T18:55:30,017 INFO [RS_OPEN_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-24T18:55:30,017 DEBUG [RS_OPEN_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-24T18:55:30,017 DEBUG [RS_OPEN_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T18:55:30,017 DEBUG [RS_OPEN_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-24T18:55:30,017 DEBUG [RS_OPEN_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-24T18:55:30,019 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-24T18:55:30,020 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-24T18:55:30,020 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T18:55:30,020 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T18:55:30,021 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-24T18:55:30,021 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-24T18:55:30,022 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T18:55:30,022 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T18:55:30,022 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-24T18:55:30,023 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-24T18:55:30,023 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T18:55:30,024 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T18:55:30,024 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-24T18:55:30,025 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-24T18:55:30,025 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T18:55:30,025 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T18:55:30,025 DEBUG [RS_OPEN_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-24T18:55:30,026 DEBUG [RS_OPEN_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/hbase/meta/1588230740 2024-11-24T18:55:30,027 DEBUG [RS_OPEN_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/hbase/meta/1588230740 2024-11-24T18:55:30,028 DEBUG [RS_OPEN_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-24T18:55:30,028 DEBUG [RS_OPEN_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-24T18:55:30,029 DEBUG [RS_OPEN_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-24T18:55:30,030 DEBUG [RS_OPEN_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-24T18:55:30,031 INFO [RS_OPEN_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=802577, jitterRate=0.02052988111972809}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-24T18:55:30,031 DEBUG [RS_OPEN_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-24T18:55:30,031 DEBUG [RS_OPEN_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732474530018Writing region info on filesystem at 1732474530018Initializing all the Stores at 1732474530019 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732474530019Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732474530019Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732474530019Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732474530019Cleaning up temporary data from old regions at 1732474530028 (+9 ms)Running coprocessor post-open hooks at 1732474530031 (+3 ms)Region opened successfully at 1732474530031 2024-11-24T18:55:30,032 INFO [RS_OPEN_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732474529998 2024-11-24T18:55:30,035 DEBUG [RS_OPEN_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-24T18:55:30,035 INFO [RS_OPEN_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-24T18:55:30,035 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=f2b92657890a,38787,1732474529015 2024-11-24T18:55:30,036 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as f2b92657890a,38787,1732474529015, state=OPEN 2024-11-24T18:55:30,351 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38787-0x1016e335fe80001, quorum=127.0.0.1:57443, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-24T18:55:30,351 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39497-0x1016e335fe80000, quorum=127.0.0.1:57443, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-24T18:55:30,351 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=f2b92657890a,38787,1732474529015 2024-11-24T18:55:30,351 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-24T18:55:30,351 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-24T18:55:30,354 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-24T18:55:30,354 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=f2b92657890a,38787,1732474529015 in 506 msec 2024-11-24T18:55:30,357 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-24T18:55:30,357 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 949 msec 2024-11-24T18:55:30,358 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-24T18:55:30,358 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-24T18:55:30,359 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-24T18:55:30,359 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=f2b92657890a,38787,1732474529015, seqNum=-1] 2024-11-24T18:55:30,359 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-24T18:55:30,361 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53441, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-24T18:55:30,366 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 1.0020 sec 2024-11-24T18:55:30,366 INFO [master/f2b92657890a:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732474530366, completionTime=-1 2024-11-24T18:55:30,366 INFO [master/f2b92657890a:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-24T18:55:30,366 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-24T18:55:30,369 INFO [master/f2b92657890a:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-24T18:55:30,369 INFO [master/f2b92657890a:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732474590369 2024-11-24T18:55:30,369 INFO [master/f2b92657890a:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732474650369 2024-11-24T18:55:30,369 INFO [master/f2b92657890a:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-11-24T18:55:30,369 INFO [master/f2b92657890a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=f2b92657890a,39497,1732474528844-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T18:55:30,369 INFO [master/f2b92657890a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=f2b92657890a,39497,1732474528844-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T18:55:30,369 INFO [master/f2b92657890a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=f2b92657890a,39497,1732474528844-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T18:55:30,369 INFO [master/f2b92657890a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-f2b92657890a:39497, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T18:55:30,369 INFO [master/f2b92657890a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-24T18:55:30,371 INFO [master/f2b92657890a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-24T18:55:30,372 DEBUG [master/f2b92657890a:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-24T18:55:30,374 INFO [master/f2b92657890a:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.287sec 2024-11-24T18:55:30,375 INFO [master/f2b92657890a:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-24T18:55:30,375 INFO [master/f2b92657890a:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-24T18:55:30,375 INFO [master/f2b92657890a:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-24T18:55:30,375 INFO [master/f2b92657890a:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-24T18:55:30,375 INFO [master/f2b92657890a:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-24T18:55:30,375 INFO [master/f2b92657890a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=f2b92657890a,39497,1732474528844-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-24T18:55:30,375 INFO [master/f2b92657890a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=f2b92657890a,39497,1732474528844-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-24T18:55:30,377 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-24T18:55:30,377 INFO [master/f2b92657890a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-24T18:55:30,377 INFO [master/f2b92657890a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=f2b92657890a,39497,1732474528844-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T18:55:30,441 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6d09ac1e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-24T18:55:30,441 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request f2b92657890a,39497,-1 for getting cluster id 2024-11-24T18:55:30,441 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-24T18:55:30,443 DEBUG [HMaster-EventLoopGroup-14-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'aa239d52-b99b-42ce-bcb7-159a372862e9' 2024-11-24T18:55:30,443 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-24T18:55:30,443 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "aa239d52-b99b-42ce-bcb7-159a372862e9" 2024-11-24T18:55:30,443 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@704d01d5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-24T18:55:30,443 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [f2b92657890a,39497,-1] 2024-11-24T18:55:30,443 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-24T18:55:30,444 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T18:55:30,445 INFO [HMaster-EventLoopGroup-14-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55796, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-24T18:55:30,445 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2983d230, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-24T18:55:30,446 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-24T18:55:30,447 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=f2b92657890a,38787,1732474529015, seqNum=-1] 2024-11-24T18:55:30,447 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-24T18:55:30,448 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40934, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-24T18:55:30,450 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=f2b92657890a,39497,1732474528844 2024-11-24T18:55:30,450 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T18:55:30,453 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-24T18:55:30,453 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-24T18:55:30,454 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.AsyncConnectionImpl(321): The fetched master address is f2b92657890a,39497,1732474528844 2024-11-24T18:55:30,454 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@6a2b2f97 2024-11-24T18:55:30,454 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-24T18:55:30,455 INFO [HMaster-EventLoopGroup-14-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55806, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-24T18:55:30,456 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39497 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-24T18:55:30,456 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39497 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-24T18:55:30,456 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39497 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-24T18:55:30,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39497 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRolling 2024-11-24T18:55:30,459 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-11-24T18:55:30,459 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T18:55:30,459 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39497 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRolling" procId is: 4 2024-11-24T18:55:30,460 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-24T18:55:30,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39497 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-24T18:55:30,467 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44655 is added to blk_1073741835_1011 (size=381) 2024-11-24T18:55:30,467 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46341 is added to blk_1073741835_1011 (size=381) 2024-11-24T18:55:30,469 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 39086109d05e3e13c2ae0a9ad08f12fd, NAME => 'TestLogRolling-testLogRolling,,1732474530456.39086109d05e3e13c2ae0a9ad08f12fd.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35 2024-11-24T18:55:30,472 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,43977,1732474387094/f2b92657890a%2C43977%2C1732474387094.1732474387326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:55:30,475 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46341 is added to blk_1073741836_1012 (size=64) 2024-11-24T18:55:30,475 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44655 is added to blk_1073741836_1012 (size=64) 2024-11-24T18:55:30,476 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1732474530456.39086109d05e3e13c2ae0a9ad08f12fd.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T18:55:30,476 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1722): Closing 39086109d05e3e13c2ae0a9ad08f12fd, disabling compactions & flushes 2024-11-24T18:55:30,476 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1732474530456.39086109d05e3e13c2ae0a9ad08f12fd. 2024-11-24T18:55:30,476 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1732474530456.39086109d05e3e13c2ae0a9ad08f12fd. 2024-11-24T18:55:30,476 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1732474530456.39086109d05e3e13c2ae0a9ad08f12fd. after waiting 0 ms 2024-11-24T18:55:30,476 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1732474530456.39086109d05e3e13c2ae0a9ad08f12fd. 2024-11-24T18:55:30,476 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1732474530456.39086109d05e3e13c2ae0a9ad08f12fd. 2024-11-24T18:55:30,476 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for 39086109d05e3e13c2ae0a9ad08f12fd: Waiting for close lock at 1732474530476Disabling compacts and flushes for region at 1732474530476Disabling writes for close at 1732474530476Writing region close event to WAL at 1732474530476Closed at 1732474530476 2024-11-24T18:55:30,477 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_ADD_TO_META 2024-11-24T18:55:30,478 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRolling,,1732474530456.39086109d05e3e13c2ae0a9ad08f12fd.","families":{"info":[{"qualifier":"regioninfo","vlen":63,"tag":[],"timestamp":"1732474530477"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732474530477"}]},"ts":"1732474530477"} 2024-11-24T18:55:30,480 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-24T18:55:30,482 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-24T18:55:30,482 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732474530482"}]},"ts":"1732474530482"} 2024-11-24T18:55:30,485 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRolling, state=ENABLING in hbase:meta 2024-11-24T18:55:30,485 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=39086109d05e3e13c2ae0a9ad08f12fd, ASSIGN}] 2024-11-24T18:55:30,486 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=39086109d05e3e13c2ae0a9ad08f12fd, ASSIGN 2024-11-24T18:55:30,487 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=39086109d05e3e13c2ae0a9ad08f12fd, ASSIGN; state=OFFLINE, location=f2b92657890a,38787,1732474529015; forceNewPlan=false, retain=false 2024-11-24T18:55:30,493 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.meta.1732474386931.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:55:30,638 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=39086109d05e3e13c2ae0a9ad08f12fd, regionState=OPENING, regionLocation=f2b92657890a,38787,1732474529015 2024-11-24T18:55:30,640 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=39086109d05e3e13c2ae0a9ad08f12fd, ASSIGN because future has completed 2024-11-24T18:55:30,641 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 39086109d05e3e13c2ae0a9ad08f12fd, server=f2b92657890a,38787,1732474529015}] 2024-11-24T18:55:30,688 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:55:30,689 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:55:30,689 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:55:30,689 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:55:30,689 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:55:30,689 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:55:30,690 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:55:30,690 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:55:30,714 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:55:30,714 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:55:30,714 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:55:30,714 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:55:30,714 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:55:30,715 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:55:30,719 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:55:30,720 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:55:30,720 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:55:30,722 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:55:30,797 INFO [RS_OPEN_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,,1732474530456.39086109d05e3e13c2ae0a9ad08f12fd. 2024-11-24T18:55:30,798 DEBUG [RS_OPEN_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 39086109d05e3e13c2ae0a9ad08f12fd, NAME => 'TestLogRolling-testLogRolling,,1732474530456.39086109d05e3e13c2ae0a9ad08f12fd.', STARTKEY => '', ENDKEY => ''} 2024-11-24T18:55:30,798 DEBUG [RS_OPEN_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling 39086109d05e3e13c2ae0a9ad08f12fd 2024-11-24T18:55:30,798 DEBUG [RS_OPEN_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1732474530456.39086109d05e3e13c2ae0a9ad08f12fd.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T18:55:30,798 DEBUG [RS_OPEN_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 39086109d05e3e13c2ae0a9ad08f12fd 2024-11-24T18:55:30,798 DEBUG [RS_OPEN_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 39086109d05e3e13c2ae0a9ad08f12fd 2024-11-24T18:55:30,799 INFO [StoreOpener-39086109d05e3e13c2ae0a9ad08f12fd-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 39086109d05e3e13c2ae0a9ad08f12fd 2024-11-24T18:55:30,801 INFO [StoreOpener-39086109d05e3e13c2ae0a9ad08f12fd-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 39086109d05e3e13c2ae0a9ad08f12fd columnFamilyName info 2024-11-24T18:55:30,801 DEBUG [StoreOpener-39086109d05e3e13c2ae0a9ad08f12fd-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T18:55:30,801 INFO [StoreOpener-39086109d05e3e13c2ae0a9ad08f12fd-1 {}] regionserver.HStore(327): Store=39086109d05e3e13c2ae0a9ad08f12fd/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T18:55:30,802 DEBUG [RS_OPEN_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 39086109d05e3e13c2ae0a9ad08f12fd 2024-11-24T18:55:30,802 DEBUG [RS_OPEN_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/39086109d05e3e13c2ae0a9ad08f12fd 2024-11-24T18:55:30,803 DEBUG [RS_OPEN_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/39086109d05e3e13c2ae0a9ad08f12fd 2024-11-24T18:55:30,803 DEBUG [RS_OPEN_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 39086109d05e3e13c2ae0a9ad08f12fd 2024-11-24T18:55:30,803 DEBUG [RS_OPEN_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 39086109d05e3e13c2ae0a9ad08f12fd 2024-11-24T18:55:30,805 DEBUG [RS_OPEN_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 39086109d05e3e13c2ae0a9ad08f12fd 2024-11-24T18:55:30,807 DEBUG [RS_OPEN_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/39086109d05e3e13c2ae0a9ad08f12fd/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-24T18:55:30,807 INFO [RS_OPEN_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 39086109d05e3e13c2ae0a9ad08f12fd; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=707421, jitterRate=-0.10046862065792084}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-24T18:55:30,807 DEBUG [RS_OPEN_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 39086109d05e3e13c2ae0a9ad08f12fd 2024-11-24T18:55:30,808 DEBUG [RS_OPEN_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 39086109d05e3e13c2ae0a9ad08f12fd: Running coprocessor pre-open hook at 1732474530798Writing region info on filesystem at 1732474530798Initializing all the Stores at 1732474530799 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732474530799Cleaning up temporary data from old regions at 1732474530803 (+4 ms)Running coprocessor post-open hooks at 1732474530807 (+4 ms)Region opened successfully at 1732474530808 (+1 ms) 2024-11-24T18:55:30,809 INFO [RS_OPEN_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,,1732474530456.39086109d05e3e13c2ae0a9ad08f12fd., pid=6, masterSystemTime=1732474530793 2024-11-24T18:55:30,811 DEBUG [RS_OPEN_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,,1732474530456.39086109d05e3e13c2ae0a9ad08f12fd. 2024-11-24T18:55:30,812 INFO [RS_OPEN_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,,1732474530456.39086109d05e3e13c2ae0a9ad08f12fd. 2024-11-24T18:55:30,812 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=39086109d05e3e13c2ae0a9ad08f12fd, regionState=OPEN, openSeqNum=2, regionLocation=f2b92657890a,38787,1732474529015 2024-11-24T18:55:30,814 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 39086109d05e3e13c2ae0a9ad08f12fd, server=f2b92657890a,38787,1732474529015 because future has completed 2024-11-24T18:55:30,818 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-24T18:55:30,818 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 39086109d05e3e13c2ae0a9ad08f12fd, server=f2b92657890a,38787,1732474529015 in 175 msec 2024-11-24T18:55:30,824 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-24T18:55:30,824 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732474530824"}]},"ts":"1732474530824"} 2024-11-24T18:55:30,825 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-24T18:55:30,825 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=39086109d05e3e13c2ae0a9ad08f12fd, ASSIGN in 333 msec 2024-11-24T18:55:30,827 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRolling, state=ENABLED in hbase:meta 2024-11-24T18:55:30,829 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_POST_OPERATION 2024-11-24T18:55:30,831 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRolling in 373 msec 2024-11-24T18:55:31,227 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-24T18:55:31,228 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:55:31,228 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:55:31,228 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:55:31,228 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:55:31,229 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:55:31,229 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:55:31,230 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:55:31,230 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:55:31,256 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:55:31,256 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:55:31,256 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:55:31,257 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:55:31,257 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:55:31,257 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:55:31,263 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:55:31,263 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:55:31,264 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:55:31,267 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:55:31,473 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,43977,1732474387094/f2b92657890a%2C43977%2C1732474387094.1732474387326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:55:31,494 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.meta.1732474386931.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:55:32,473 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,43977,1732474387094/f2b92657890a%2C43977%2C1732474387094.1732474387326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:55:32,494 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.meta.1732474386931.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:55:33,474 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,43977,1732474387094/f2b92657890a%2C43977%2C1732474387094.1732474387326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:55:33,495 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.meta.1732474386931.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:55:34,475 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,43977,1732474387094/f2b92657890a%2C43977%2C1732474387094.1732474387326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:55:34,496 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.meta.1732474386931.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:55:35,475 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,43977,1732474387094/f2b92657890a%2C43977%2C1732474387094.1732474387326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:55:35,490 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-24T18:55:35,491 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRolling' 2024-11-24T18:55:35,496 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.meta.1732474386931.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:55:36,301 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-24T18:55:36,302 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:55:36,303 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:55:36,303 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:55:36,303 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:55:36,304 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:55:36,304 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:55:36,305 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:55:36,305 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:55:36,333 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:55:36,333 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:55:36,333 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:55:36,334 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:55:36,334 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:55:36,334 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:55:36,339 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:55:36,339 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:55:36,339 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:55:36,342 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:55:36,476 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,43977,1732474387094/f2b92657890a%2C43977%2C1732474387094.1732474387326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:55:36,497 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.meta.1732474386931.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:55:37,477 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,43977,1732474387094/f2b92657890a%2C43977%2C1732474387094.1732474387326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:55:37,498 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.meta.1732474386931.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:55:38,327 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-24T18:55:38,327 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-24T18:55:38,328 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-24T18:55:38,328 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-24T18:55:38,328 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-24T18:55:38,328 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-24T18:55:38,329 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling 2024-11-24T18:55:38,329 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling Metrics about Tables on a single HBase RegionServer 2024-11-24T18:55:38,477 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,43977,1732474387094/f2b92657890a%2C43977%2C1732474387094.1732474387326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:55:38,498 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.meta.1732474386931.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:55:39,478 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,43977,1732474387094/f2b92657890a%2C43977%2C1732474387094.1732474387326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:55:39,499 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.meta.1732474386931.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:55:40,479 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,43977,1732474387094/f2b92657890a%2C43977%2C1732474387094.1732474387326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:55:40,500 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.meta.1732474386931.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:55:40,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39497 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-24T18:55:40,541 INFO [RPCClient-NioEventLoopGroup-4-7 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRolling completed 2024-11-24T18:55:40,541 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRolling,, stopping at row=TestLogRolling-testLogRolling ,, for max=2147483647 with caching=100 2024-11-24T18:55:40,544 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRolling 2024-11-24T18:55:40,544 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRolling,,1732474530456.39086109d05e3e13c2ae0a9ad08f12fd. 2024-11-24T18:55:40,547 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testLogRolling,,1732474530456.39086109d05e3e13c2ae0a9ad08f12fd., hostname=f2b92657890a,38787,1732474529015, seqNum=2] 2024-11-24T18:55:40,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38787 {}] regionserver.HRegion(8855): Flush requested on 39086109d05e3e13c2ae0a9ad08f12fd 2024-11-24T18:55:40,561 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 39086109d05e3e13c2ae0a9ad08f12fd 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-24T18:55:40,582 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/39086109d05e3e13c2ae0a9ad08f12fd/.tmp/info/ff6b0684856745c18cf87744569afe5f is 1080, key is row0001/info:/1732474540548/Put/seqid=0 2024-11-24T18:55:40,592 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46341 is added to blk_1073741837_1013 (size=12509) 2024-11-24T18:55:40,592 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44655 is added to blk_1073741837_1013 (size=12509) 2024-11-24T18:55:40,593 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/39086109d05e3e13c2ae0a9ad08f12fd/.tmp/info/ff6b0684856745c18cf87744569afe5f 2024-11-24T18:55:40,601 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/39086109d05e3e13c2ae0a9ad08f12fd/.tmp/info/ff6b0684856745c18cf87744569afe5f as hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/39086109d05e3e13c2ae0a9ad08f12fd/info/ff6b0684856745c18cf87744569afe5f 2024-11-24T18:55:40,607 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/39086109d05e3e13c2ae0a9ad08f12fd/info/ff6b0684856745c18cf87744569afe5f, entries=7, sequenceid=11, filesize=12.2 K 2024-11-24T18:55:40,608 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=22.07 KB/22596 for 39086109d05e3e13c2ae0a9ad08f12fd in 47ms, sequenceid=11, compaction requested=false 2024-11-24T18:55:40,608 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 39086109d05e3e13c2ae0a9ad08f12fd: 2024-11-24T18:55:40,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38787 {}] regionserver.HRegion(8855): Flush requested on 39086109d05e3e13c2ae0a9ad08f12fd 2024-11-24T18:55:40,608 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 39086109d05e3e13c2ae0a9ad08f12fd 1/1 column families, dataSize=23.12 KB heapSize=25 KB 2024-11-24T18:55:40,613 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/39086109d05e3e13c2ae0a9ad08f12fd/.tmp/info/93a991dfef324dfdb3a3e28c5f6cc554 is 1080, key is row0008/info:/1732474540563/Put/seqid=0 2024-11-24T18:55:40,618 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44655 is added to blk_1073741838_1014 (size=28684) 2024-11-24T18:55:40,619 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46341 is added to blk_1073741838_1014 (size=28684) 2024-11-24T18:55:40,619 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=23.12 KB at sequenceid=36 (bloomFilter=true), to=hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/39086109d05e3e13c2ae0a9ad08f12fd/.tmp/info/93a991dfef324dfdb3a3e28c5f6cc554 2024-11-24T18:55:40,624 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/39086109d05e3e13c2ae0a9ad08f12fd/.tmp/info/93a991dfef324dfdb3a3e28c5f6cc554 as hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/39086109d05e3e13c2ae0a9ad08f12fd/info/93a991dfef324dfdb3a3e28c5f6cc554 2024-11-24T18:55:40,630 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/39086109d05e3e13c2ae0a9ad08f12fd/info/93a991dfef324dfdb3a3e28c5f6cc554, entries=22, sequenceid=36, filesize=28.0 K 2024-11-24T18:55:40,631 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.12 KB/23672, heapSize ~24.98 KB/25584, currentSize=3.15 KB/3228 for 39086109d05e3e13c2ae0a9ad08f12fd in 23ms, sequenceid=36, compaction requested=false 2024-11-24T18:55:40,631 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 39086109d05e3e13c2ae0a9ad08f12fd: 2024-11-24T18:55:40,631 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=40.2 K, sizeToCheck=16.0 K 2024-11-24T18:55:40,631 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-24T18:55:40,631 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/39086109d05e3e13c2ae0a9ad08f12fd/info/93a991dfef324dfdb3a3e28c5f6cc554 because midkey is the same as first or last row 2024-11-24T18:55:41,480 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,43977,1732474387094/f2b92657890a%2C43977%2C1732474387094.1732474387326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:55:41,500 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.meta.1732474386931.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:55:42,481 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,43977,1732474387094/f2b92657890a%2C43977%2C1732474387094.1732474387326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:55:42,501 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.meta.1732474386931.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:55:42,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38787 {}] regionserver.HRegion(8855): Flush requested on 39086109d05e3e13c2ae0a9ad08f12fd 2024-11-24T18:55:42,627 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 39086109d05e3e13c2ae0a9ad08f12fd 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-24T18:55:42,633 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/39086109d05e3e13c2ae0a9ad08f12fd/.tmp/info/b2d0f28065e74d2db9f1d2dbe3760d50 is 1080, key is row0030/info:/1732474540610/Put/seqid=0 2024-11-24T18:55:42,637 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44655 is added to blk_1073741839_1015 (size=12509) 2024-11-24T18:55:42,638 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46341 is added to blk_1073741839_1015 (size=12509) 2024-11-24T18:55:42,638 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=46 (bloomFilter=true), to=hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/39086109d05e3e13c2ae0a9ad08f12fd/.tmp/info/b2d0f28065e74d2db9f1d2dbe3760d50 2024-11-24T18:55:42,645 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/39086109d05e3e13c2ae0a9ad08f12fd/.tmp/info/b2d0f28065e74d2db9f1d2dbe3760d50 as hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/39086109d05e3e13c2ae0a9ad08f12fd/info/b2d0f28065e74d2db9f1d2dbe3760d50 2024-11-24T18:55:42,652 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/39086109d05e3e13c2ae0a9ad08f12fd/info/b2d0f28065e74d2db9f1d2dbe3760d50, entries=7, sequenceid=46, filesize=12.2 K 2024-11-24T18:55:42,652 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=10.51 KB/10760 for 39086109d05e3e13c2ae0a9ad08f12fd in 25ms, sequenceid=46, compaction requested=true 2024-11-24T18:55:42,653 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 39086109d05e3e13c2ae0a9ad08f12fd: 2024-11-24T18:55:42,653 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=52.4 K, sizeToCheck=16.0 K 2024-11-24T18:55:42,653 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-24T18:55:42,653 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/39086109d05e3e13c2ae0a9ad08f12fd/info/93a991dfef324dfdb3a3e28c5f6cc554 because midkey is the same as first or last row 2024-11-24T18:55:42,653 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 39086109d05e3e13c2ae0a9ad08f12fd:info, priority=-2147483648, current under compaction store size is 1 2024-11-24T18:55:42,653 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-24T18:55:42,653 DEBUG [RS:0;f2b92657890a:38787-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-24T18:55:42,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38787 {}] regionserver.HRegion(8855): Flush requested on 39086109d05e3e13c2ae0a9ad08f12fd 2024-11-24T18:55:42,654 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 39086109d05e3e13c2ae0a9ad08f12fd 1/1 column families, dataSize=11.56 KB heapSize=12.63 KB 2024-11-24T18:55:42,654 DEBUG [RS:0;f2b92657890a:38787-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 53702 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-24T18:55:42,655 DEBUG [RS:0;f2b92657890a:38787-shortCompactions-0 {}] regionserver.HStore(1541): 39086109d05e3e13c2ae0a9ad08f12fd/info is initiating minor compaction (all files) 2024-11-24T18:55:42,655 INFO [RS:0;f2b92657890a:38787-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 39086109d05e3e13c2ae0a9ad08f12fd/info in TestLogRolling-testLogRolling,,1732474530456.39086109d05e3e13c2ae0a9ad08f12fd. 2024-11-24T18:55:42,655 INFO [RS:0;f2b92657890a:38787-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/39086109d05e3e13c2ae0a9ad08f12fd/info/ff6b0684856745c18cf87744569afe5f, hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/39086109d05e3e13c2ae0a9ad08f12fd/info/93a991dfef324dfdb3a3e28c5f6cc554, hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/39086109d05e3e13c2ae0a9ad08f12fd/info/b2d0f28065e74d2db9f1d2dbe3760d50] into tmpdir=hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/39086109d05e3e13c2ae0a9ad08f12fd/.tmp, totalSize=52.4 K 2024-11-24T18:55:42,655 DEBUG [RS:0;f2b92657890a:38787-shortCompactions-0 {}] compactions.Compactor(225): Compacting ff6b0684856745c18cf87744569afe5f, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1732474540548 2024-11-24T18:55:42,656 DEBUG [RS:0;f2b92657890a:38787-shortCompactions-0 {}] compactions.Compactor(225): Compacting 93a991dfef324dfdb3a3e28c5f6cc554, keycount=22, bloomtype=ROW, size=28.0 K, encoding=NONE, compression=NONE, seqNum=36, earliestPutTs=1732474540563 2024-11-24T18:55:42,656 DEBUG [RS:0;f2b92657890a:38787-shortCompactions-0 {}] compactions.Compactor(225): Compacting b2d0f28065e74d2db9f1d2dbe3760d50, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=46, earliestPutTs=1732474540610 2024-11-24T18:55:42,659 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/39086109d05e3e13c2ae0a9ad08f12fd/.tmp/info/eded3005968c4e29b64ec40be2c837a5 is 1080, key is row0037/info:/1732474542629/Put/seqid=0 2024-11-24T18:55:42,664 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46341 is added to blk_1073741840_1016 (size=16817) 2024-11-24T18:55:42,665 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44655 is added to blk_1073741840_1016 (size=16817) 2024-11-24T18:55:42,665 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.56 KB at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/39086109d05e3e13c2ae0a9ad08f12fd/.tmp/info/eded3005968c4e29b64ec40be2c837a5 2024-11-24T18:55:42,676 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/39086109d05e3e13c2ae0a9ad08f12fd/.tmp/info/eded3005968c4e29b64ec40be2c837a5 as hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/39086109d05e3e13c2ae0a9ad08f12fd/info/eded3005968c4e29b64ec40be2c837a5 2024-11-24T18:55:42,679 INFO [RS:0;f2b92657890a:38787-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 39086109d05e3e13c2ae0a9ad08f12fd#info#compaction#59 average throughput is 18.47 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-24T18:55:42,680 DEBUG [RS:0;f2b92657890a:38787-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/39086109d05e3e13c2ae0a9ad08f12fd/.tmp/info/b3c5de618ab344e39c487dd5496ac8e5 is 1080, key is row0001/info:/1732474540548/Put/seqid=0 2024-11-24T18:55:42,683 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/39086109d05e3e13c2ae0a9ad08f12fd/info/eded3005968c4e29b64ec40be2c837a5, entries=11, sequenceid=60, filesize=16.4 K 2024-11-24T18:55:42,684 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~11.56 KB/11836, heapSize ~12.61 KB/12912, currentSize=12.61 KB/12912 for 39086109d05e3e13c2ae0a9ad08f12fd in 30ms, sequenceid=60, compaction requested=false 2024-11-24T18:55:42,684 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 39086109d05e3e13c2ae0a9ad08f12fd: 2024-11-24T18:55:42,684 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=68.9 K, sizeToCheck=16.0 K 2024-11-24T18:55:42,684 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-24T18:55:42,684 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/39086109d05e3e13c2ae0a9ad08f12fd/info/93a991dfef324dfdb3a3e28c5f6cc554 because midkey is the same as first or last row 2024-11-24T18:55:42,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38787 {}] regionserver.HRegion(8855): Flush requested on 39086109d05e3e13c2ae0a9ad08f12fd 2024-11-24T18:55:42,686 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 39086109d05e3e13c2ae0a9ad08f12fd 1/1 column families, dataSize=13.66 KB heapSize=14.88 KB 2024-11-24T18:55:42,694 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/39086109d05e3e13c2ae0a9ad08f12fd/.tmp/info/f759b59f93b7489d86fb42eb2097f8e9 is 1080, key is row0048/info:/1732474542656/Put/seqid=0 2024-11-24T18:55:42,698 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44655 is added to blk_1073741841_1017 (size=43901) 2024-11-24T18:55:42,698 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46341 is added to blk_1073741841_1017 (size=43901) 2024-11-24T18:55:42,699 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44655 is added to blk_1073741842_1018 (size=18987) 2024-11-24T18:55:42,700 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46341 is added to blk_1073741842_1018 (size=18987) 2024-11-24T18:55:42,700 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.66 KB at sequenceid=76 (bloomFilter=true), to=hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/39086109d05e3e13c2ae0a9ad08f12fd/.tmp/info/f759b59f93b7489d86fb42eb2097f8e9 2024-11-24T18:55:42,705 DEBUG [RS:0;f2b92657890a:38787-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/39086109d05e3e13c2ae0a9ad08f12fd/.tmp/info/b3c5de618ab344e39c487dd5496ac8e5 as hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/39086109d05e3e13c2ae0a9ad08f12fd/info/b3c5de618ab344e39c487dd5496ac8e5 2024-11-24T18:55:42,706 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/39086109d05e3e13c2ae0a9ad08f12fd/.tmp/info/f759b59f93b7489d86fb42eb2097f8e9 as hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/39086109d05e3e13c2ae0a9ad08f12fd/info/f759b59f93b7489d86fb42eb2097f8e9 2024-11-24T18:55:42,711 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/39086109d05e3e13c2ae0a9ad08f12fd/info/f759b59f93b7489d86fb42eb2097f8e9, entries=13, sequenceid=76, filesize=18.5 K 2024-11-24T18:55:42,712 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~13.66 KB/13988, heapSize ~14.86 KB/15216, currentSize=4.20 KB/4304 for 39086109d05e3e13c2ae0a9ad08f12fd in 26ms, sequenceid=76, compaction requested=false 2024-11-24T18:55:42,712 INFO [RS:0;f2b92657890a:38787-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 39086109d05e3e13c2ae0a9ad08f12fd/info of 39086109d05e3e13c2ae0a9ad08f12fd into b3c5de618ab344e39c487dd5496ac8e5(size=42.9 K), total size for store is 77.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-24T18:55:42,712 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 39086109d05e3e13c2ae0a9ad08f12fd: 2024-11-24T18:55:42,712 DEBUG [RS:0;f2b92657890a:38787-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 39086109d05e3e13c2ae0a9ad08f12fd: 2024-11-24T18:55:42,712 INFO [RS:0;f2b92657890a:38787-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1732474530456.39086109d05e3e13c2ae0a9ad08f12fd., storeName=39086109d05e3e13c2ae0a9ad08f12fd/info, priority=13, startTime=1732474542653; duration=0sec 2024-11-24T18:55:42,712 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=77.8 K, sizeToCheck=16.0 K 2024-11-24T18:55:42,712 DEBUG [RS:0;f2b92657890a:38787-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=77.8 K, sizeToCheck=16.0 K 2024-11-24T18:55:42,712 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-24T18:55:42,712 DEBUG [RS:0;f2b92657890a:38787-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-24T18:55:42,712 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/39086109d05e3e13c2ae0a9ad08f12fd/info/b3c5de618ab344e39c487dd5496ac8e5 because midkey is the same as first or last row 2024-11-24T18:55:42,712 DEBUG [RS:0;f2b92657890a:38787-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/39086109d05e3e13c2ae0a9ad08f12fd/info/b3c5de618ab344e39c487dd5496ac8e5 because midkey is the same as first or last row 2024-11-24T18:55:42,713 DEBUG [RS:0;f2b92657890a:38787-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=77.8 K, sizeToCheck=16.0 K 2024-11-24T18:55:42,713 DEBUG [RS:0;f2b92657890a:38787-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-24T18:55:42,713 DEBUG [RS:0;f2b92657890a:38787-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/39086109d05e3e13c2ae0a9ad08f12fd/info/b3c5de618ab344e39c487dd5496ac8e5 because midkey is the same as first or last row 2024-11-24T18:55:42,713 DEBUG [RS:0;f2b92657890a:38787-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=77.8 K, sizeToCheck=16.0 K 2024-11-24T18:55:42,713 DEBUG [RS:0;f2b92657890a:38787-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-24T18:55:42,713 DEBUG [RS:0;f2b92657890a:38787-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/39086109d05e3e13c2ae0a9ad08f12fd/info/b3c5de618ab344e39c487dd5496ac8e5 because midkey is the same as first or last row 2024-11-24T18:55:42,713 DEBUG [RS:0;f2b92657890a:38787-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-24T18:55:42,713 DEBUG [RS:0;f2b92657890a:38787-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 39086109d05e3e13c2ae0a9ad08f12fd:info 2024-11-24T18:55:43,481 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,43977,1732474387094/f2b92657890a%2C43977%2C1732474387094.1732474387326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:55:43,502 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.meta.1732474386931.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:55:44,482 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,43977,1732474387094/f2b92657890a%2C43977%2C1732474387094.1732474387326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:55:44,502 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.meta.1732474386931.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:55:44,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38787 {}] regionserver.HRegion(8855): Flush requested on 39086109d05e3e13c2ae0a9ad08f12fd 2024-11-24T18:55:44,702 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 39086109d05e3e13c2ae0a9ad08f12fd 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-24T18:55:44,707 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/39086109d05e3e13c2ae0a9ad08f12fd/.tmp/info/ff57c94629e34748914afa66b2f96bf1 is 1080, key is row0061/info:/1732474542687/Put/seqid=0 2024-11-24T18:55:44,712 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44655 is added to blk_1073741843_1019 (size=12509) 2024-11-24T18:55:44,712 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46341 is added to blk_1073741843_1019 (size=12509) 2024-11-24T18:55:44,713 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=87 (bloomFilter=true), to=hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/39086109d05e3e13c2ae0a9ad08f12fd/.tmp/info/ff57c94629e34748914afa66b2f96bf1 2024-11-24T18:55:44,719 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/39086109d05e3e13c2ae0a9ad08f12fd/.tmp/info/ff57c94629e34748914afa66b2f96bf1 as hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/39086109d05e3e13c2ae0a9ad08f12fd/info/ff57c94629e34748914afa66b2f96bf1 2024-11-24T18:55:44,725 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/39086109d05e3e13c2ae0a9ad08f12fd/info/ff57c94629e34748914afa66b2f96bf1, entries=7, sequenceid=87, filesize=12.2 K 2024-11-24T18:55:44,726 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=10.51 KB/10760 for 39086109d05e3e13c2ae0a9ad08f12fd in 25ms, sequenceid=87, compaction requested=true 2024-11-24T18:55:44,726 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 39086109d05e3e13c2ae0a9ad08f12fd: 2024-11-24T18:55:44,727 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=90.1 K, sizeToCheck=16.0 K 2024-11-24T18:55:44,727 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-24T18:55:44,727 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/39086109d05e3e13c2ae0a9ad08f12fd/info/b3c5de618ab344e39c487dd5496ac8e5 because midkey is the same as first or last row 2024-11-24T18:55:44,727 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 39086109d05e3e13c2ae0a9ad08f12fd:info, priority=-2147483648, current under compaction store size is 1 2024-11-24T18:55:44,727 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-24T18:55:44,727 DEBUG [RS:0;f2b92657890a:38787-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-24T18:55:44,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38787 {}] regionserver.HRegion(8855): Flush requested on 39086109d05e3e13c2ae0a9ad08f12fd 2024-11-24T18:55:44,728 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 39086109d05e3e13c2ae0a9ad08f12fd 1/1 column families, dataSize=11.56 KB heapSize=12.63 KB 2024-11-24T18:55:44,728 DEBUG [RS:0;f2b92657890a:38787-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 92214 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-24T18:55:44,728 DEBUG [RS:0;f2b92657890a:38787-shortCompactions-0 {}] regionserver.HStore(1541): 39086109d05e3e13c2ae0a9ad08f12fd/info is initiating minor compaction (all files) 2024-11-24T18:55:44,729 INFO [RS:0;f2b92657890a:38787-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 39086109d05e3e13c2ae0a9ad08f12fd/info in TestLogRolling-testLogRolling,,1732474530456.39086109d05e3e13c2ae0a9ad08f12fd. 2024-11-24T18:55:44,729 INFO [RS:0;f2b92657890a:38787-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/39086109d05e3e13c2ae0a9ad08f12fd/info/b3c5de618ab344e39c487dd5496ac8e5, hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/39086109d05e3e13c2ae0a9ad08f12fd/info/eded3005968c4e29b64ec40be2c837a5, hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/39086109d05e3e13c2ae0a9ad08f12fd/info/f759b59f93b7489d86fb42eb2097f8e9, hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/39086109d05e3e13c2ae0a9ad08f12fd/info/ff57c94629e34748914afa66b2f96bf1] into tmpdir=hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/39086109d05e3e13c2ae0a9ad08f12fd/.tmp, totalSize=90.1 K 2024-11-24T18:55:44,729 DEBUG [RS:0;f2b92657890a:38787-shortCompactions-0 {}] compactions.Compactor(225): Compacting b3c5de618ab344e39c487dd5496ac8e5, keycount=36, bloomtype=ROW, size=42.9 K, encoding=NONE, compression=NONE, seqNum=46, earliestPutTs=1732474540548 2024-11-24T18:55:44,729 DEBUG [RS:0;f2b92657890a:38787-shortCompactions-0 {}] compactions.Compactor(225): Compacting eded3005968c4e29b64ec40be2c837a5, keycount=11, bloomtype=ROW, size=16.4 K, encoding=NONE, compression=NONE, seqNum=60, earliestPutTs=1732474542629 2024-11-24T18:55:44,730 DEBUG [RS:0;f2b92657890a:38787-shortCompactions-0 {}] compactions.Compactor(225): Compacting f759b59f93b7489d86fb42eb2097f8e9, keycount=13, bloomtype=ROW, size=18.5 K, encoding=NONE, compression=NONE, seqNum=76, earliestPutTs=1732474542656 2024-11-24T18:55:44,730 DEBUG [RS:0;f2b92657890a:38787-shortCompactions-0 {}] compactions.Compactor(225): Compacting ff57c94629e34748914afa66b2f96bf1, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=87, earliestPutTs=1732474542687 2024-11-24T18:55:44,732 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/39086109d05e3e13c2ae0a9ad08f12fd/.tmp/info/834c878770fc45baa52294c92f4058e4 is 1080, key is row0068/info:/1732474544703/Put/seqid=0 2024-11-24T18:55:44,761 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44655 is added to blk_1073741844_1020 (size=16817) 2024-11-24T18:55:44,762 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46341 is added to blk_1073741844_1020 (size=16817) 2024-11-24T18:55:44,762 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.56 KB at sequenceid=101 (bloomFilter=true), to=hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/39086109d05e3e13c2ae0a9ad08f12fd/.tmp/info/834c878770fc45baa52294c92f4058e4 2024-11-24T18:55:44,765 INFO [RS:0;f2b92657890a:38787-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 39086109d05e3e13c2ae0a9ad08f12fd#info#compaction#63 average throughput is 17.19 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-24T18:55:44,765 DEBUG [RS:0;f2b92657890a:38787-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/39086109d05e3e13c2ae0a9ad08f12fd/.tmp/info/d70f4cd3b88a497d865b6ea27a60f4ed is 1080, key is row0001/info:/1732474540548/Put/seqid=0 2024-11-24T18:55:44,770 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/39086109d05e3e13c2ae0a9ad08f12fd/.tmp/info/834c878770fc45baa52294c92f4058e4 as hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/39086109d05e3e13c2ae0a9ad08f12fd/info/834c878770fc45baa52294c92f4058e4 2024-11-24T18:55:44,774 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46341 is added to blk_1073741845_1021 (size=77566) 2024-11-24T18:55:44,775 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44655 is added to blk_1073741845_1021 (size=77566) 2024-11-24T18:55:44,777 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/39086109d05e3e13c2ae0a9ad08f12fd/info/834c878770fc45baa52294c92f4058e4, entries=11, sequenceid=101, filesize=16.4 K 2024-11-24T18:55:44,778 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~11.56 KB/11836, heapSize ~12.61 KB/12912, currentSize=18.91 KB/19368 for 39086109d05e3e13c2ae0a9ad08f12fd in 50ms, sequenceid=101, compaction requested=false 2024-11-24T18:55:44,778 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 39086109d05e3e13c2ae0a9ad08f12fd: 2024-11-24T18:55:44,778 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=106.5 K, sizeToCheck=16.0 K 2024-11-24T18:55:44,778 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-24T18:55:44,778 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/39086109d05e3e13c2ae0a9ad08f12fd/info/b3c5de618ab344e39c487dd5496ac8e5 because midkey is the same as first or last row 2024-11-24T18:55:44,782 DEBUG [RS:0;f2b92657890a:38787-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/39086109d05e3e13c2ae0a9ad08f12fd/.tmp/info/d70f4cd3b88a497d865b6ea27a60f4ed as hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/39086109d05e3e13c2ae0a9ad08f12fd/info/d70f4cd3b88a497d865b6ea27a60f4ed 2024-11-24T18:55:44,789 INFO [RS:0;f2b92657890a:38787-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 4 (all) file(s) in 39086109d05e3e13c2ae0a9ad08f12fd/info of 39086109d05e3e13c2ae0a9ad08f12fd into d70f4cd3b88a497d865b6ea27a60f4ed(size=75.7 K), total size for store is 92.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-24T18:55:44,790 DEBUG [RS:0;f2b92657890a:38787-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 39086109d05e3e13c2ae0a9ad08f12fd: 2024-11-24T18:55:44,790 INFO [RS:0;f2b92657890a:38787-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1732474530456.39086109d05e3e13c2ae0a9ad08f12fd., storeName=39086109d05e3e13c2ae0a9ad08f12fd/info, priority=12, startTime=1732474544727; duration=0sec 2024-11-24T18:55:44,790 DEBUG [RS:0;f2b92657890a:38787-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=92.2 K, sizeToCheck=16.0 K 2024-11-24T18:55:44,790 DEBUG [RS:0;f2b92657890a:38787-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-24T18:55:44,790 DEBUG [RS:0;f2b92657890a:38787-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=92.2 K, sizeToCheck=16.0 K 2024-11-24T18:55:44,790 DEBUG [RS:0;f2b92657890a:38787-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-24T18:55:44,790 DEBUG [RS:0;f2b92657890a:38787-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=92.2 K, sizeToCheck=16.0 K 2024-11-24T18:55:44,790 DEBUG [RS:0;f2b92657890a:38787-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-24T18:55:44,791 DEBUG [RS:0;f2b92657890a:38787-shortCompactions-0 {}] regionserver.CompactSplit(239): Splitting TestLogRolling-testLogRolling,,1732474530456.39086109d05e3e13c2ae0a9ad08f12fd., compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-24T18:55:44,791 DEBUG [RS:0;f2b92657890a:38787-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-24T18:55:44,791 DEBUG [RS:0;f2b92657890a:38787-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 39086109d05e3e13c2ae0a9ad08f12fd:info 2024-11-24T18:55:44,792 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39497 {}] assignment.AssignmentManager(1363): Split request from f2b92657890a,38787,1732474529015, parent={ENCODED => 39086109d05e3e13c2ae0a9ad08f12fd, NAME => 'TestLogRolling-testLogRolling,,1732474530456.39086109d05e3e13c2ae0a9ad08f12fd.', STARTKEY => '', ENDKEY => ''}, splitKey=row0062 2024-11-24T18:55:44,798 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39497 {}] assignment.SplitTableRegionProcedure(223): Splittable=true state=OPEN, location=f2b92657890a,38787,1732474529015 2024-11-24T18:55:44,803 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39497 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=39086109d05e3e13c2ae0a9ad08f12fd, daughterA=206a079f6509e9ab6eaf2e5852d1e5e8, daughterB=4767344858182c51ec061a20b9011b4b 2024-11-24T18:55:44,804 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=39086109d05e3e13c2ae0a9ad08f12fd, daughterA=206a079f6509e9ab6eaf2e5852d1e5e8, daughterB=4767344858182c51ec061a20b9011b4b 2024-11-24T18:55:44,804 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=39086109d05e3e13c2ae0a9ad08f12fd, daughterA=206a079f6509e9ab6eaf2e5852d1e5e8, daughterB=4767344858182c51ec061a20b9011b4b 2024-11-24T18:55:44,804 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=39086109d05e3e13c2ae0a9ad08f12fd, daughterA=206a079f6509e9ab6eaf2e5852d1e5e8, daughterB=4767344858182c51ec061a20b9011b4b 2024-11-24T18:55:44,812 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=39086109d05e3e13c2ae0a9ad08f12fd, UNASSIGN}] 2024-11-24T18:55:44,813 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=39086109d05e3e13c2ae0a9ad08f12fd, UNASSIGN 2024-11-24T18:55:44,814 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=8 updating hbase:meta row=39086109d05e3e13c2ae0a9ad08f12fd, regionState=CLOSING, regionLocation=f2b92657890a,38787,1732474529015 2024-11-24T18:55:44,816 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=39086109d05e3e13c2ae0a9ad08f12fd, UNASSIGN because future has completed 2024-11-24T18:55:44,817 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: true: evictOnSplit: true: evictOnClose: false 2024-11-24T18:55:44,817 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=9, ppid=8, state=RUNNABLE, hasLock=false; CloseRegionProcedure 39086109d05e3e13c2ae0a9ad08f12fd, server=f2b92657890a,38787,1732474529015}] 2024-11-24T18:55:44,975 INFO [RS_CLOSE_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(122): Close 39086109d05e3e13c2ae0a9ad08f12fd 2024-11-24T18:55:44,975 DEBUG [RS_CLOSE_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(136): Unassign region: split region: true: evictCache: true 2024-11-24T18:55:44,976 DEBUG [RS_CLOSE_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1722): Closing 39086109d05e3e13c2ae0a9ad08f12fd, disabling compactions & flushes 2024-11-24T18:55:44,976 INFO [RS_CLOSE_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1732474530456.39086109d05e3e13c2ae0a9ad08f12fd. 2024-11-24T18:55:44,976 DEBUG [RS_CLOSE_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1732474530456.39086109d05e3e13c2ae0a9ad08f12fd. 2024-11-24T18:55:44,976 DEBUG [RS_CLOSE_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1732474530456.39086109d05e3e13c2ae0a9ad08f12fd. after waiting 0 ms 2024-11-24T18:55:44,976 DEBUG [RS_CLOSE_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1732474530456.39086109d05e3e13c2ae0a9ad08f12fd. 2024-11-24T18:55:44,976 INFO [RS_CLOSE_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(2902): Flushing 39086109d05e3e13c2ae0a9ad08f12fd 1/1 column families, dataSize=18.91 KB heapSize=20.50 KB 2024-11-24T18:55:44,981 DEBUG [RS_CLOSE_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/39086109d05e3e13c2ae0a9ad08f12fd/.tmp/info/727c23d250fd4509b96cfd72a766ccac is 1080, key is row0079/info:/1732474544729/Put/seqid=0 2024-11-24T18:55:44,987 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44655 is added to blk_1073741846_1022 (size=24376) 2024-11-24T18:55:44,987 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46341 is added to blk_1073741846_1022 (size=24376) 2024-11-24T18:55:44,988 INFO [RS_CLOSE_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=18.91 KB at sequenceid=123 (bloomFilter=true), to=hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/39086109d05e3e13c2ae0a9ad08f12fd/.tmp/info/727c23d250fd4509b96cfd72a766ccac 2024-11-24T18:55:44,995 DEBUG [RS_CLOSE_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/39086109d05e3e13c2ae0a9ad08f12fd/.tmp/info/727c23d250fd4509b96cfd72a766ccac as hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/39086109d05e3e13c2ae0a9ad08f12fd/info/727c23d250fd4509b96cfd72a766ccac 2024-11-24T18:55:45,001 INFO [RS_CLOSE_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/39086109d05e3e13c2ae0a9ad08f12fd/info/727c23d250fd4509b96cfd72a766ccac, entries=18, sequenceid=123, filesize=23.8 K 2024-11-24T18:55:45,002 INFO [RS_CLOSE_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(3140): Finished flush of dataSize ~18.91 KB/19368, heapSize ~20.48 KB/20976, currentSize=0 B/0 for 39086109d05e3e13c2ae0a9ad08f12fd in 25ms, sequenceid=123, compaction requested=true 2024-11-24T18:55:45,003 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732474530456.39086109d05e3e13c2ae0a9ad08f12fd.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/39086109d05e3e13c2ae0a9ad08f12fd/info/ff6b0684856745c18cf87744569afe5f, hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/39086109d05e3e13c2ae0a9ad08f12fd/info/93a991dfef324dfdb3a3e28c5f6cc554, hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/39086109d05e3e13c2ae0a9ad08f12fd/info/b3c5de618ab344e39c487dd5496ac8e5, hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/39086109d05e3e13c2ae0a9ad08f12fd/info/b2d0f28065e74d2db9f1d2dbe3760d50, hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/39086109d05e3e13c2ae0a9ad08f12fd/info/eded3005968c4e29b64ec40be2c837a5, hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/39086109d05e3e13c2ae0a9ad08f12fd/info/f759b59f93b7489d86fb42eb2097f8e9, hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/39086109d05e3e13c2ae0a9ad08f12fd/info/ff57c94629e34748914afa66b2f96bf1] to archive 2024-11-24T18:55:45,004 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732474530456.39086109d05e3e13c2ae0a9ad08f12fd.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-24T18:55:45,006 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732474530456.39086109d05e3e13c2ae0a9ad08f12fd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/39086109d05e3e13c2ae0a9ad08f12fd/info/ff6b0684856745c18cf87744569afe5f to hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/archive/data/default/TestLogRolling-testLogRolling/39086109d05e3e13c2ae0a9ad08f12fd/info/ff6b0684856745c18cf87744569afe5f 2024-11-24T18:55:45,007 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732474530456.39086109d05e3e13c2ae0a9ad08f12fd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/39086109d05e3e13c2ae0a9ad08f12fd/info/93a991dfef324dfdb3a3e28c5f6cc554 to hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/archive/data/default/TestLogRolling-testLogRolling/39086109d05e3e13c2ae0a9ad08f12fd/info/93a991dfef324dfdb3a3e28c5f6cc554 2024-11-24T18:55:45,008 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732474530456.39086109d05e3e13c2ae0a9ad08f12fd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/39086109d05e3e13c2ae0a9ad08f12fd/info/b3c5de618ab344e39c487dd5496ac8e5 to hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/archive/data/default/TestLogRolling-testLogRolling/39086109d05e3e13c2ae0a9ad08f12fd/info/b3c5de618ab344e39c487dd5496ac8e5 2024-11-24T18:55:45,010 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732474530456.39086109d05e3e13c2ae0a9ad08f12fd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/39086109d05e3e13c2ae0a9ad08f12fd/info/b2d0f28065e74d2db9f1d2dbe3760d50 to hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/archive/data/default/TestLogRolling-testLogRolling/39086109d05e3e13c2ae0a9ad08f12fd/info/b2d0f28065e74d2db9f1d2dbe3760d50 2024-11-24T18:55:45,011 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732474530456.39086109d05e3e13c2ae0a9ad08f12fd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/39086109d05e3e13c2ae0a9ad08f12fd/info/eded3005968c4e29b64ec40be2c837a5 to hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/archive/data/default/TestLogRolling-testLogRolling/39086109d05e3e13c2ae0a9ad08f12fd/info/eded3005968c4e29b64ec40be2c837a5 2024-11-24T18:55:45,012 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732474530456.39086109d05e3e13c2ae0a9ad08f12fd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/39086109d05e3e13c2ae0a9ad08f12fd/info/f759b59f93b7489d86fb42eb2097f8e9 to hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/archive/data/default/TestLogRolling-testLogRolling/39086109d05e3e13c2ae0a9ad08f12fd/info/f759b59f93b7489d86fb42eb2097f8e9 2024-11-24T18:55:45,013 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732474530456.39086109d05e3e13c2ae0a9ad08f12fd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/39086109d05e3e13c2ae0a9ad08f12fd/info/ff57c94629e34748914afa66b2f96bf1 to hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/archive/data/default/TestLogRolling-testLogRolling/39086109d05e3e13c2ae0a9ad08f12fd/info/ff57c94629e34748914afa66b2f96bf1 2024-11-24T18:55:45,019 DEBUG [RS_CLOSE_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/39086109d05e3e13c2ae0a9ad08f12fd/recovered.edits/126.seqid, newMaxSeqId=126, maxSeqId=1 2024-11-24T18:55:45,020 INFO [RS_CLOSE_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1732474530456.39086109d05e3e13c2ae0a9ad08f12fd. 2024-11-24T18:55:45,020 DEBUG [RS_CLOSE_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1676): Region close journal for 39086109d05e3e13c2ae0a9ad08f12fd: Waiting for close lock at 1732474544976Running coprocessor pre-close hooks at 1732474544976Disabling compacts and flushes for region at 1732474544976Disabling writes for close at 1732474544976Obtaining lock to block concurrent updates at 1732474544976Preparing flush snapshotting stores in 39086109d05e3e13c2ae0a9ad08f12fd at 1732474544976Finished memstore snapshotting TestLogRolling-testLogRolling,,1732474530456.39086109d05e3e13c2ae0a9ad08f12fd., syncing WAL and waiting on mvcc, flushsize=dataSize=19368, getHeapSize=20976, getOffHeapSize=0, getCellsCount=18 at 1732474544976Flushing stores of TestLogRolling-testLogRolling,,1732474530456.39086109d05e3e13c2ae0a9ad08f12fd. at 1732474544977 (+1 ms)Flushing 39086109d05e3e13c2ae0a9ad08f12fd/info: creating writer at 1732474544977Flushing 39086109d05e3e13c2ae0a9ad08f12fd/info: appending metadata at 1732474544980 (+3 ms)Flushing 39086109d05e3e13c2ae0a9ad08f12fd/info: closing flushed file at 1732474544980Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6f8470ed: reopening flushed file at 1732474544994 (+14 ms)Finished flush of dataSize ~18.91 KB/19368, heapSize ~20.48 KB/20976, currentSize=0 B/0 for 39086109d05e3e13c2ae0a9ad08f12fd in 25ms, sequenceid=123, compaction requested=true at 1732474545002 (+8 ms)Writing region close event to WAL at 1732474545015 (+13 ms)Running coprocessor post-close hooks at 1732474545020 (+5 ms)Closed at 1732474545020 2024-11-24T18:55:45,022 INFO [RS_CLOSE_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(157): Closed 39086109d05e3e13c2ae0a9ad08f12fd 2024-11-24T18:55:45,022 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=8 updating hbase:meta row=39086109d05e3e13c2ae0a9ad08f12fd, regionState=CLOSED 2024-11-24T18:55:45,024 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=9, ppid=8, state=RUNNABLE, hasLock=false; CloseRegionProcedure 39086109d05e3e13c2ae0a9ad08f12fd, server=f2b92657890a,38787,1732474529015 because future has completed 2024-11-24T18:55:45,028 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=9, resume processing ppid=8 2024-11-24T18:55:45,028 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, ppid=8, state=SUCCESS, hasLock=false; CloseRegionProcedure 39086109d05e3e13c2ae0a9ad08f12fd, server=f2b92657890a,38787,1732474529015 in 209 msec 2024-11-24T18:55:45,030 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-11-24T18:55:45,030 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=39086109d05e3e13c2ae0a9ad08f12fd, UNASSIGN in 217 msec 2024-11-24T18:55:45,040 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T18:55:45,044 INFO [PEWorker-5 {}] assignment.SplitTableRegionProcedure(728): pid=7 splitting 3 storefiles, region=39086109d05e3e13c2ae0a9ad08f12fd, threads=3 2024-11-24T18:55:45,046 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/39086109d05e3e13c2ae0a9ad08f12fd/info/727c23d250fd4509b96cfd72a766ccac for region: 39086109d05e3e13c2ae0a9ad08f12fd 2024-11-24T18:55:45,046 DEBUG [StoreFileSplitter-pool-1 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/39086109d05e3e13c2ae0a9ad08f12fd/info/834c878770fc45baa52294c92f4058e4 for region: 39086109d05e3e13c2ae0a9ad08f12fd 2024-11-24T18:55:45,052 DEBUG [StoreFileSplitter-pool-2 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/39086109d05e3e13c2ae0a9ad08f12fd/info/d70f4cd3b88a497d865b6ea27a60f4ed for region: 39086109d05e3e13c2ae0a9ad08f12fd 2024-11-24T18:55:45,055 DEBUG [StoreFileSplitter-pool-0 {}] regionserver.HRegionFileSystem(650): Will create HFileLink file for hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/39086109d05e3e13c2ae0a9ad08f12fd/info/727c23d250fd4509b96cfd72a766ccac, top=true 2024-11-24T18:55:45,056 DEBUG [StoreFileSplitter-pool-1 {}] regionserver.HRegionFileSystem(650): Will create HFileLink file for hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/39086109d05e3e13c2ae0a9ad08f12fd/info/834c878770fc45baa52294c92f4058e4, top=true 2024-11-24T18:55:45,114 INFO [StoreFileSplitter-pool-1 {}] regionserver.HRegionFileSystem(691): Created linkFile:hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/4767344858182c51ec061a20b9011b4b/info/TestLogRolling-testLogRolling=39086109d05e3e13c2ae0a9ad08f12fd-834c878770fc45baa52294c92f4058e4 for child: 4767344858182c51ec061a20b9011b4b, parent: 39086109d05e3e13c2ae0a9ad08f12fd 2024-11-24T18:55:45,114 INFO [StoreFileSplitter-pool-0 {}] regionserver.HRegionFileSystem(691): Created linkFile:hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/4767344858182c51ec061a20b9011b4b/info/TestLogRolling-testLogRolling=39086109d05e3e13c2ae0a9ad08f12fd-727c23d250fd4509b96cfd72a766ccac for child: 4767344858182c51ec061a20b9011b4b, parent: 39086109d05e3e13c2ae0a9ad08f12fd 2024-11-24T18:55:45,115 DEBUG [StoreFileSplitter-pool-1 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/39086109d05e3e13c2ae0a9ad08f12fd/info/834c878770fc45baa52294c92f4058e4 for region: 39086109d05e3e13c2ae0a9ad08f12fd 2024-11-24T18:55:45,115 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/39086109d05e3e13c2ae0a9ad08f12fd/info/727c23d250fd4509b96cfd72a766ccac for region: 39086109d05e3e13c2ae0a9ad08f12fd 2024-11-24T18:55:45,116 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46341 is added to blk_1073741847_1023 (size=27) 2024-11-24T18:55:45,116 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44655 is added to blk_1073741847_1023 (size=27) 2024-11-24T18:55:45,126 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44655 is added to blk_1073741848_1024 (size=27) 2024-11-24T18:55:45,126 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46341 is added to blk_1073741848_1024 (size=27) 2024-11-24T18:55:45,127 DEBUG [StoreFileSplitter-pool-2 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/39086109d05e3e13c2ae0a9ad08f12fd/info/d70f4cd3b88a497d865b6ea27a60f4ed for region: 39086109d05e3e13c2ae0a9ad08f12fd 2024-11-24T18:55:45,129 DEBUG [PEWorker-5 {}] assignment.SplitTableRegionProcedure(802): pid=7 split storefiles for region 39086109d05e3e13c2ae0a9ad08f12fd Daughter A: [hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/206a079f6509e9ab6eaf2e5852d1e5e8/info/d70f4cd3b88a497d865b6ea27a60f4ed.39086109d05e3e13c2ae0a9ad08f12fd] storefiles, Daughter B: [hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/4767344858182c51ec061a20b9011b4b/info/TestLogRolling-testLogRolling=39086109d05e3e13c2ae0a9ad08f12fd-727c23d250fd4509b96cfd72a766ccac, hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/4767344858182c51ec061a20b9011b4b/info/TestLogRolling-testLogRolling=39086109d05e3e13c2ae0a9ad08f12fd-834c878770fc45baa52294c92f4058e4, hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/4767344858182c51ec061a20b9011b4b/info/d70f4cd3b88a497d865b6ea27a60f4ed.39086109d05e3e13c2ae0a9ad08f12fd] storefiles. 2024-11-24T18:55:45,138 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44655 is added to blk_1073741849_1025 (size=71) 2024-11-24T18:55:45,138 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46341 is added to blk_1073741849_1025 (size=71) 2024-11-24T18:55:45,140 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T18:55:45,153 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44655 is added to blk_1073741850_1026 (size=71) 2024-11-24T18:55:45,153 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46341 is added to blk_1073741850_1026 (size=71) 2024-11-24T18:55:45,154 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T18:55:45,165 DEBUG [PEWorker-5 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/206a079f6509e9ab6eaf2e5852d1e5e8/recovered.edits/126.seqid, newMaxSeqId=126, maxSeqId=-1 2024-11-24T18:55:45,167 DEBUG [PEWorker-5 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/4767344858182c51ec061a20b9011b4b/recovered.edits/126.seqid, newMaxSeqId=126, maxSeqId=-1 2024-11-24T18:55:45,169 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,,1732474530456.39086109d05e3e13c2ae0a9ad08f12fd.","families":{"info":[{"qualifier":"regioninfo","vlen":63,"tag":[],"timestamp":"1732474545169"},{"qualifier":"splitA","vlen":70,"tag":[],"timestamp":"1732474545169"},{"qualifier":"splitB","vlen":70,"tag":[],"timestamp":"1732474545169"}]},"ts":"1732474545169"} 2024-11-24T18:55:45,170 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,,1732474544798.206a079f6509e9ab6eaf2e5852d1e5e8.","families":{"info":[{"qualifier":"regioninfo","vlen":70,"tag":[],"timestamp":"1732474545169"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732474545169"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1732474545169"}]},"ts":"1732474545169"} 2024-11-24T18:55:45,170 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,row0062,1732474544798.4767344858182c51ec061a20b9011b4b.","families":{"info":[{"qualifier":"regioninfo","vlen":70,"tag":[],"timestamp":"1732474545169"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732474545169"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1732474545169"}]},"ts":"1732474545169"} 2024-11-24T18:55:45,189 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=206a079f6509e9ab6eaf2e5852d1e5e8, ASSIGN}, {pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=4767344858182c51ec061a20b9011b4b, ASSIGN}] 2024-11-24T18:55:45,190 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=206a079f6509e9ab6eaf2e5852d1e5e8, ASSIGN 2024-11-24T18:55:45,190 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=4767344858182c51ec061a20b9011b4b, ASSIGN 2024-11-24T18:55:45,191 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=206a079f6509e9ab6eaf2e5852d1e5e8, ASSIGN; state=SPLITTING_NEW, location=f2b92657890a,38787,1732474529015; forceNewPlan=false, retain=false 2024-11-24T18:55:45,191 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=4767344858182c51ec061a20b9011b4b, ASSIGN; state=SPLITTING_NEW, location=f2b92657890a,38787,1732474529015; forceNewPlan=false, retain=false 2024-11-24T18:55:45,342 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=10 updating hbase:meta row=206a079f6509e9ab6eaf2e5852d1e5e8, regionState=OPENING, regionLocation=f2b92657890a,38787,1732474529015 2024-11-24T18:55:45,342 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=11 updating hbase:meta row=4767344858182c51ec061a20b9011b4b, regionState=OPENING, regionLocation=f2b92657890a,38787,1732474529015 2024-11-24T18:55:45,344 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=206a079f6509e9ab6eaf2e5852d1e5e8, ASSIGN because future has completed 2024-11-24T18:55:45,345 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=12, ppid=10, state=RUNNABLE, hasLock=false; OpenRegionProcedure 206a079f6509e9ab6eaf2e5852d1e5e8, server=f2b92657890a,38787,1732474529015}] 2024-11-24T18:55:45,345 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=4767344858182c51ec061a20b9011b4b, ASSIGN because future has completed 2024-11-24T18:55:45,346 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=13, ppid=11, state=RUNNABLE, hasLock=false; OpenRegionProcedure 4767344858182c51ec061a20b9011b4b, server=f2b92657890a,38787,1732474529015}] 2024-11-24T18:55:45,483 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,43977,1732474387094/f2b92657890a%2C43977%2C1732474387094.1732474387326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:55:45,501 INFO [RS_OPEN_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,,1732474544798.206a079f6509e9ab6eaf2e5852d1e5e8. 2024-11-24T18:55:45,501 DEBUG [RS_OPEN_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7752): Opening region: {ENCODED => 206a079f6509e9ab6eaf2e5852d1e5e8, NAME => 'TestLogRolling-testLogRolling,,1732474544798.206a079f6509e9ab6eaf2e5852d1e5e8.', STARTKEY => '', ENDKEY => 'row0062'} 2024-11-24T18:55:45,501 DEBUG [RS_OPEN_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling 206a079f6509e9ab6eaf2e5852d1e5e8 2024-11-24T18:55:45,501 DEBUG [RS_OPEN_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1732474544798.206a079f6509e9ab6eaf2e5852d1e5e8.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T18:55:45,501 DEBUG [RS_OPEN_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7794): checking encryption for 206a079f6509e9ab6eaf2e5852d1e5e8 2024-11-24T18:55:45,501 DEBUG [RS_OPEN_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7797): checking classloading for 206a079f6509e9ab6eaf2e5852d1e5e8 2024-11-24T18:55:45,503 INFO [StoreOpener-206a079f6509e9ab6eaf2e5852d1e5e8-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 206a079f6509e9ab6eaf2e5852d1e5e8 2024-11-24T18:55:45,503 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.meta.1732474386931.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:55:45,504 INFO [StoreOpener-206a079f6509e9ab6eaf2e5852d1e5e8-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 206a079f6509e9ab6eaf2e5852d1e5e8 columnFamilyName info 2024-11-24T18:55:45,504 DEBUG [StoreOpener-206a079f6509e9ab6eaf2e5852d1e5e8-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T18:55:45,517 DEBUG [StoreOpener-206a079f6509e9ab6eaf2e5852d1e5e8-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/206a079f6509e9ab6eaf2e5852d1e5e8/info/d70f4cd3b88a497d865b6ea27a60f4ed.39086109d05e3e13c2ae0a9ad08f12fd->hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/39086109d05e3e13c2ae0a9ad08f12fd/info/d70f4cd3b88a497d865b6ea27a60f4ed-bottom 2024-11-24T18:55:45,518 INFO [StoreOpener-206a079f6509e9ab6eaf2e5852d1e5e8-1 {}] regionserver.HStore(327): Store=206a079f6509e9ab6eaf2e5852d1e5e8/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T18:55:45,518 DEBUG [RS_OPEN_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1038): replaying wal for 206a079f6509e9ab6eaf2e5852d1e5e8 2024-11-24T18:55:45,519 DEBUG [RS_OPEN_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/206a079f6509e9ab6eaf2e5852d1e5e8 2024-11-24T18:55:45,520 DEBUG [RS_OPEN_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/206a079f6509e9ab6eaf2e5852d1e5e8 2024-11-24T18:55:45,521 DEBUG [RS_OPEN_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1048): stopping wal replay for 206a079f6509e9ab6eaf2e5852d1e5e8 2024-11-24T18:55:45,521 DEBUG [RS_OPEN_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1060): Cleaning up temporary data for 206a079f6509e9ab6eaf2e5852d1e5e8 2024-11-24T18:55:45,523 DEBUG [RS_OPEN_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1093): writing seq id for 206a079f6509e9ab6eaf2e5852d1e5e8 2024-11-24T18:55:45,523 INFO [RS_OPEN_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1114): Opened 206a079f6509e9ab6eaf2e5852d1e5e8; next sequenceid=127; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=730545, jitterRate=-0.07106439769268036}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-24T18:55:45,523 DEBUG [RS_OPEN_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 206a079f6509e9ab6eaf2e5852d1e5e8 2024-11-24T18:55:45,524 DEBUG [RS_OPEN_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1006): Region open journal for 206a079f6509e9ab6eaf2e5852d1e5e8: Running coprocessor pre-open hook at 1732474545502Writing region info on filesystem at 1732474545502Initializing all the Stores at 1732474545502Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732474545502Cleaning up temporary data from old regions at 1732474545521 (+19 ms)Running coprocessor post-open hooks at 1732474545523 (+2 ms)Region opened successfully at 1732474545524 (+1 ms) 2024-11-24T18:55:45,525 INFO [RS_OPEN_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,,1732474544798.206a079f6509e9ab6eaf2e5852d1e5e8., pid=12, masterSystemTime=1732474545497 2024-11-24T18:55:45,525 DEBUG [RS_OPEN_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.CompactSplit(403): Add compact mark for store 206a079f6509e9ab6eaf2e5852d1e5e8:info, priority=-2147483648, current under compaction store size is 1 2024-11-24T18:55:45,525 DEBUG [RS_OPEN_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: Opening Region; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-24T18:55:45,525 DEBUG [RS:0;f2b92657890a:38787-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 1 store files, 0 compacting, 1 eligible, 16 blocking 2024-11-24T18:55:45,526 INFO [RS:0;f2b92657890a:38787-shortCompactions-0 {}] regionserver.HStore(1527): Keeping/Overriding Compaction request priority to -2147482648 for CF info since it belongs to recently split daughter region TestLogRolling-testLogRolling,,1732474544798.206a079f6509e9ab6eaf2e5852d1e5e8. 2024-11-24T18:55:45,526 DEBUG [RS:0;f2b92657890a:38787-shortCompactions-0 {}] regionserver.HStore(1541): 206a079f6509e9ab6eaf2e5852d1e5e8/info is initiating minor compaction (all files) 2024-11-24T18:55:45,526 INFO [RS:0;f2b92657890a:38787-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 206a079f6509e9ab6eaf2e5852d1e5e8/info in TestLogRolling-testLogRolling,,1732474544798.206a079f6509e9ab6eaf2e5852d1e5e8. 2024-11-24T18:55:45,526 INFO [RS:0;f2b92657890a:38787-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/206a079f6509e9ab6eaf2e5852d1e5e8/info/d70f4cd3b88a497d865b6ea27a60f4ed.39086109d05e3e13c2ae0a9ad08f12fd->hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/39086109d05e3e13c2ae0a9ad08f12fd/info/d70f4cd3b88a497d865b6ea27a60f4ed-bottom] into tmpdir=hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/206a079f6509e9ab6eaf2e5852d1e5e8/.tmp, totalSize=75.7 K 2024-11-24T18:55:45,527 DEBUG [RS:0;f2b92657890a:38787-shortCompactions-0 {}] compactions.Compactor(225): Compacting d70f4cd3b88a497d865b6ea27a60f4ed.39086109d05e3e13c2ae0a9ad08f12fd, keycount=33, bloomtype=ROW, size=75.7 K, encoding=NONE, compression=NONE, seqNum=87, earliestPutTs=1732474540548 2024-11-24T18:55:45,527 DEBUG [RS_OPEN_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,,1732474544798.206a079f6509e9ab6eaf2e5852d1e5e8. 2024-11-24T18:55:45,528 INFO [RS_OPEN_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,,1732474544798.206a079f6509e9ab6eaf2e5852d1e5e8. 2024-11-24T18:55:45,528 INFO [RS_OPEN_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,row0062,1732474544798.4767344858182c51ec061a20b9011b4b. 2024-11-24T18:55:45,528 DEBUG [RS_OPEN_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7752): Opening region: {ENCODED => 4767344858182c51ec061a20b9011b4b, NAME => 'TestLogRolling-testLogRolling,row0062,1732474544798.4767344858182c51ec061a20b9011b4b.', STARTKEY => 'row0062', ENDKEY => ''} 2024-11-24T18:55:45,528 DEBUG [RS_OPEN_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling 4767344858182c51ec061a20b9011b4b 2024-11-24T18:55:45,528 DEBUG [RS_OPEN_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,row0062,1732474544798.4767344858182c51ec061a20b9011b4b.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T18:55:45,528 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=10 updating hbase:meta row=206a079f6509e9ab6eaf2e5852d1e5e8, regionState=OPEN, openSeqNum=127, regionLocation=f2b92657890a,38787,1732474529015 2024-11-24T18:55:45,528 DEBUG [RS_OPEN_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7794): checking encryption for 4767344858182c51ec061a20b9011b4b 2024-11-24T18:55:45,528 DEBUG [RS_OPEN_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7797): checking classloading for 4767344858182c51ec061a20b9011b4b 2024-11-24T18:55:45,529 INFO [StoreOpener-4767344858182c51ec061a20b9011b4b-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 4767344858182c51ec061a20b9011b4b 2024-11-24T18:55:45,530 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38787 {}] regionserver.HRegion(8855): Flush requested on 1588230740 2024-11-24T18:55:45,530 DEBUG [MemStoreFlusher.0 {}] regionserver.FlushAllLargeStoresPolicy(69): Since none of the CFs were above the size, flushing all. 2024-11-24T18:55:45,531 INFO [StoreOpener-4767344858182c51ec061a20b9011b4b-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 4767344858182c51ec061a20b9011b4b columnFamilyName info 2024-11-24T18:55:45,531 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=5.11 KB heapSize=8.96 KB 2024-11-24T18:55:45,531 DEBUG [StoreOpener-4767344858182c51ec061a20b9011b4b-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T18:55:45,531 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=12, ppid=10, state=RUNNABLE, hasLock=false; OpenRegionProcedure 206a079f6509e9ab6eaf2e5852d1e5e8, server=f2b92657890a,38787,1732474529015 because future has completed 2024-11-24T18:55:45,535 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=12, resume processing ppid=10 2024-11-24T18:55:45,535 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, ppid=10, state=SUCCESS, hasLock=false; OpenRegionProcedure 206a079f6509e9ab6eaf2e5852d1e5e8, server=f2b92657890a,38787,1732474529015 in 187 msec 2024-11-24T18:55:45,537 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=206a079f6509e9ab6eaf2e5852d1e5e8, ASSIGN in 346 msec 2024-11-24T18:55:45,540 DEBUG [StoreOpener-4767344858182c51ec061a20b9011b4b-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/4767344858182c51ec061a20b9011b4b/info/TestLogRolling-testLogRolling=39086109d05e3e13c2ae0a9ad08f12fd-727c23d250fd4509b96cfd72a766ccac 2024-11-24T18:55:45,545 DEBUG [StoreOpener-4767344858182c51ec061a20b9011b4b-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/4767344858182c51ec061a20b9011b4b/info/TestLogRolling-testLogRolling=39086109d05e3e13c2ae0a9ad08f12fd-834c878770fc45baa52294c92f4058e4 2024-11-24T18:55:45,550 INFO [RS:0;f2b92657890a:38787-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 206a079f6509e9ab6eaf2e5852d1e5e8#info#compaction#65 average throughput is 20.87 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-24T18:55:45,550 DEBUG [RS:0;f2b92657890a:38787-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/206a079f6509e9ab6eaf2e5852d1e5e8/.tmp/info/a9461b9047094cd3bb844138a24b4d7d is 1080, key is row0001/info:/1732474540548/Put/seqid=0 2024-11-24T18:55:45,551 DEBUG [StoreOpener-4767344858182c51ec061a20b9011b4b-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/4767344858182c51ec061a20b9011b4b/info/d70f4cd3b88a497d865b6ea27a60f4ed.39086109d05e3e13c2ae0a9ad08f12fd->hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/39086109d05e3e13c2ae0a9ad08f12fd/info/d70f4cd3b88a497d865b6ea27a60f4ed-top 2024-11-24T18:55:45,551 INFO [StoreOpener-4767344858182c51ec061a20b9011b4b-1 {}] regionserver.HStore(327): Store=4767344858182c51ec061a20b9011b4b/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T18:55:45,551 DEBUG [RS_OPEN_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1038): replaying wal for 4767344858182c51ec061a20b9011b4b 2024-11-24T18:55:45,552 DEBUG [RS_OPEN_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/4767344858182c51ec061a20b9011b4b 2024-11-24T18:55:45,553 DEBUG [RS_OPEN_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/4767344858182c51ec061a20b9011b4b 2024-11-24T18:55:45,554 DEBUG [RS_OPEN_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1048): stopping wal replay for 4767344858182c51ec061a20b9011b4b 2024-11-24T18:55:45,554 DEBUG [RS_OPEN_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1060): Cleaning up temporary data for 4767344858182c51ec061a20b9011b4b 2024-11-24T18:55:45,556 DEBUG [RS_OPEN_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1093): writing seq id for 4767344858182c51ec061a20b9011b4b 2024-11-24T18:55:45,557 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/hbase/meta/1588230740/.tmp/info/9bbac35557514c24b97f34165cb8d83f is 193, key is TestLogRolling-testLogRolling,row0062,1732474544798.4767344858182c51ec061a20b9011b4b./info:regioninfo/1732474545342/Put/seqid=0 2024-11-24T18:55:45,557 INFO [RS_OPEN_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1114): Opened 4767344858182c51ec061a20b9011b4b; next sequenceid=127; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=772938, jitterRate=-0.017159223556518555}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-24T18:55:45,557 DEBUG [RS_OPEN_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 4767344858182c51ec061a20b9011b4b 2024-11-24T18:55:45,557 DEBUG [RS_OPEN_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1006): Region open journal for 4767344858182c51ec061a20b9011b4b: Running coprocessor pre-open hook at 1732474545528Writing region info on filesystem at 1732474545528Initializing all the Stores at 1732474545529 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732474545529Cleaning up temporary data from old regions at 1732474545554 (+25 ms)Running coprocessor post-open hooks at 1732474545557 (+3 ms)Region opened successfully at 1732474545557 2024-11-24T18:55:45,558 INFO [RS_OPEN_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,row0062,1732474544798.4767344858182c51ec061a20b9011b4b., pid=13, masterSystemTime=1732474545497 2024-11-24T18:55:45,558 DEBUG [RS_OPEN_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.CompactSplit(403): Add compact mark for store 4767344858182c51ec061a20b9011b4b:info, priority=-2147483648, current under compaction store size is 2 2024-11-24T18:55:45,558 DEBUG [RS_OPEN_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: Opening Region; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-24T18:55:45,558 DEBUG [RS:0;f2b92657890a:38787-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-24T18:55:45,559 INFO [RS:0;f2b92657890a:38787-longCompactions-0 {}] regionserver.HStore(1527): Keeping/Overriding Compaction request priority to -2147482648 for CF info since it belongs to recently split daughter region TestLogRolling-testLogRolling,row0062,1732474544798.4767344858182c51ec061a20b9011b4b. 2024-11-24T18:55:45,560 DEBUG [RS:0;f2b92657890a:38787-longCompactions-0 {}] regionserver.HStore(1541): 4767344858182c51ec061a20b9011b4b/info is initiating minor compaction (all files) 2024-11-24T18:55:45,560 INFO [RS:0;f2b92657890a:38787-longCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 4767344858182c51ec061a20b9011b4b/info in TestLogRolling-testLogRolling,row0062,1732474544798.4767344858182c51ec061a20b9011b4b. 2024-11-24T18:55:45,560 INFO [RS:0;f2b92657890a:38787-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/4767344858182c51ec061a20b9011b4b/info/d70f4cd3b88a497d865b6ea27a60f4ed.39086109d05e3e13c2ae0a9ad08f12fd->hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/39086109d05e3e13c2ae0a9ad08f12fd/info/d70f4cd3b88a497d865b6ea27a60f4ed-top, hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/4767344858182c51ec061a20b9011b4b/info/TestLogRolling-testLogRolling=39086109d05e3e13c2ae0a9ad08f12fd-834c878770fc45baa52294c92f4058e4, hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/4767344858182c51ec061a20b9011b4b/info/TestLogRolling-testLogRolling=39086109d05e3e13c2ae0a9ad08f12fd-727c23d250fd4509b96cfd72a766ccac] into tmpdir=hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/4767344858182c51ec061a20b9011b4b/.tmp, totalSize=116.0 K 2024-11-24T18:55:45,560 DEBUG [RS_OPEN_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,row0062,1732474544798.4767344858182c51ec061a20b9011b4b. 2024-11-24T18:55:45,561 DEBUG [RS:0;f2b92657890a:38787-longCompactions-0 {}] compactions.Compactor(225): Compacting d70f4cd3b88a497d865b6ea27a60f4ed.39086109d05e3e13c2ae0a9ad08f12fd, keycount=33, bloomtype=ROW, size=75.7 K, encoding=NONE, compression=NONE, seqNum=88, earliestPutTs=1732474540548 2024-11-24T18:55:45,561 INFO [RS_OPEN_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,row0062,1732474544798.4767344858182c51ec061a20b9011b4b. 2024-11-24T18:55:45,561 DEBUG [RS:0;f2b92657890a:38787-longCompactions-0 {}] compactions.Compactor(225): Compacting TestLogRolling-testLogRolling=39086109d05e3e13c2ae0a9ad08f12fd-834c878770fc45baa52294c92f4058e4, keycount=11, bloomtype=ROW, size=16.4 K, encoding=NONE, compression=NONE, seqNum=101, earliestPutTs=1732474544703 2024-11-24T18:55:45,561 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=11 updating hbase:meta row=4767344858182c51ec061a20b9011b4b, regionState=OPEN, openSeqNum=127, regionLocation=f2b92657890a,38787,1732474529015 2024-11-24T18:55:45,561 DEBUG [RS:0;f2b92657890a:38787-longCompactions-0 {}] compactions.Compactor(225): Compacting TestLogRolling-testLogRolling=39086109d05e3e13c2ae0a9ad08f12fd-727c23d250fd4509b96cfd72a766ccac, keycount=18, bloomtype=ROW, size=23.8 K, encoding=NONE, compression=NONE, seqNum=123, earliestPutTs=1732474544729 2024-11-24T18:55:45,563 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=13, ppid=11, state=RUNNABLE, hasLock=false; OpenRegionProcedure 4767344858182c51ec061a20b9011b4b, server=f2b92657890a,38787,1732474529015 because future has completed 2024-11-24T18:55:45,567 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44655 is added to blk_1073741851_1027 (size=70862) 2024-11-24T18:55:45,568 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46341 is added to blk_1073741851_1027 (size=70862) 2024-11-24T18:55:45,569 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=13, resume processing ppid=11 2024-11-24T18:55:45,569 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, ppid=11, state=SUCCESS, hasLock=false; OpenRegionProcedure 4767344858182c51ec061a20b9011b4b, server=f2b92657890a,38787,1732474529015 in 220 msec 2024-11-24T18:55:45,569 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44655 is added to blk_1073741852_1028 (size=9847) 2024-11-24T18:55:45,570 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46341 is added to blk_1073741852_1028 (size=9847) 2024-11-24T18:55:45,572 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.92 KB at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/hbase/meta/1588230740/.tmp/info/9bbac35557514c24b97f34165cb8d83f 2024-11-24T18:55:45,573 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=11, resume processing ppid=7 2024-11-24T18:55:45,573 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=4767344858182c51ec061a20b9011b4b, ASSIGN in 380 msec 2024-11-24T18:55:45,576 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=39086109d05e3e13c2ae0a9ad08f12fd, daughterA=206a079f6509e9ab6eaf2e5852d1e5e8, daughterB=4767344858182c51ec061a20b9011b4b in 775 msec 2024-11-24T18:55:45,581 DEBUG [RS:0;f2b92657890a:38787-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/206a079f6509e9ab6eaf2e5852d1e5e8/.tmp/info/a9461b9047094cd3bb844138a24b4d7d as hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/206a079f6509e9ab6eaf2e5852d1e5e8/info/a9461b9047094cd3bb844138a24b4d7d 2024-11-24T18:55:45,588 INFO [RS:0;f2b92657890a:38787-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 1 (all) file(s) in 206a079f6509e9ab6eaf2e5852d1e5e8/info of 206a079f6509e9ab6eaf2e5852d1e5e8 into a9461b9047094cd3bb844138a24b4d7d(size=69.2 K), total size for store is 69.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-24T18:55:45,588 DEBUG [RS:0;f2b92657890a:38787-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 206a079f6509e9ab6eaf2e5852d1e5e8: 2024-11-24T18:55:45,588 INFO [RS:0;f2b92657890a:38787-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1732474544798.206a079f6509e9ab6eaf2e5852d1e5e8., storeName=206a079f6509e9ab6eaf2e5852d1e5e8/info, priority=15, startTime=1732474545525; duration=0sec 2024-11-24T18:55:45,588 DEBUG [RS:0;f2b92657890a:38787-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-24T18:55:45,588 DEBUG [RS:0;f2b92657890a:38787-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 206a079f6509e9ab6eaf2e5852d1e5e8:info 2024-11-24T18:55:45,593 INFO [RS:0;f2b92657890a:38787-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 4767344858182c51ec061a20b9011b4b#info#compaction#67 average throughput is 35.92 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-24T18:55:45,594 DEBUG [RS:0;f2b92657890a:38787-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/4767344858182c51ec061a20b9011b4b/.tmp/info/b52819689fd1401595cf36002beb852f is 1080, key is row0062/info:/1732474542689/Put/seqid=0 2024-11-24T18:55:45,596 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/hbase/meta/1588230740/.tmp/ns/c529e80337734c76a32798ca31d4b2dc is 43, key is default/ns:d/1732474530361/Put/seqid=0 2024-11-24T18:55:45,605 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44655 is added to blk_1073741853_1029 (size=42984) 2024-11-24T18:55:45,605 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46341 is added to blk_1073741853_1029 (size=42984) 2024-11-24T18:55:45,610 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44655 is added to blk_1073741854_1030 (size=5153) 2024-11-24T18:55:45,610 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46341 is added to blk_1073741854_1030 (size=5153) 2024-11-24T18:55:45,611 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/hbase/meta/1588230740/.tmp/ns/c529e80337734c76a32798ca31d4b2dc 2024-11-24T18:55:45,612 DEBUG [RS:0;f2b92657890a:38787-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/4767344858182c51ec061a20b9011b4b/.tmp/info/b52819689fd1401595cf36002beb852f as hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/4767344858182c51ec061a20b9011b4b/info/b52819689fd1401595cf36002beb852f 2024-11-24T18:55:45,619 INFO [RS:0;f2b92657890a:38787-longCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 4767344858182c51ec061a20b9011b4b/info of 4767344858182c51ec061a20b9011b4b into b52819689fd1401595cf36002beb852f(size=42.0 K), total size for store is 42.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-24T18:55:45,619 DEBUG [RS:0;f2b92657890a:38787-longCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 4767344858182c51ec061a20b9011b4b: 2024-11-24T18:55:45,619 INFO [RS:0;f2b92657890a:38787-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1732474544798.4767344858182c51ec061a20b9011b4b., storeName=4767344858182c51ec061a20b9011b4b/info, priority=13, startTime=1732474545558; duration=0sec 2024-11-24T18:55:45,619 DEBUG [RS:0;f2b92657890a:38787-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-24T18:55:45,620 DEBUG [RS:0;f2b92657890a:38787-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 4767344858182c51ec061a20b9011b4b:info 2024-11-24T18:55:45,639 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/hbase/meta/1588230740/.tmp/table/4a3af3a375184717a0170c7401e1c24a is 65, key is TestLogRolling-testLogRolling/table:state/1732474530824/Put/seqid=0 2024-11-24T18:55:45,643 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44655 is added to blk_1073741855_1031 (size=5340) 2024-11-24T18:55:45,644 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46341 is added to blk_1073741855_1031 (size=5340) 2024-11-24T18:55:45,644 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=122 B at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/hbase/meta/1588230740/.tmp/table/4a3af3a375184717a0170c7401e1c24a 2024-11-24T18:55:45,650 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/hbase/meta/1588230740/.tmp/info/9bbac35557514c24b97f34165cb8d83f as hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/hbase/meta/1588230740/info/9bbac35557514c24b97f34165cb8d83f 2024-11-24T18:55:45,657 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/hbase/meta/1588230740/info/9bbac35557514c24b97f34165cb8d83f, entries=30, sequenceid=17, filesize=9.6 K 2024-11-24T18:55:45,658 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/hbase/meta/1588230740/.tmp/ns/c529e80337734c76a32798ca31d4b2dc as hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/hbase/meta/1588230740/ns/c529e80337734c76a32798ca31d4b2dc 2024-11-24T18:55:45,663 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/hbase/meta/1588230740/ns/c529e80337734c76a32798ca31d4b2dc, entries=2, sequenceid=17, filesize=5.0 K 2024-11-24T18:55:45,664 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/hbase/meta/1588230740/.tmp/table/4a3af3a375184717a0170c7401e1c24a as hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/hbase/meta/1588230740/table/4a3af3a375184717a0170c7401e1c24a 2024-11-24T18:55:45,670 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/hbase/meta/1588230740/table/4a3af3a375184717a0170c7401e1c24a, entries=2, sequenceid=17, filesize=5.2 K 2024-11-24T18:55:45,671 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~5.11 KB/5234, heapSize ~8.66 KB/8872, currentSize=705 B/705 for 1588230740 in 141ms, sequenceid=17, compaction requested=false 2024-11-24T18:55:45,671 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-11-24T18:55:46,483 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,43977,1732474387094/f2b92657890a%2C43977%2C1732474387094.1732474387326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:55:46,503 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.meta.1732474386931.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:55:46,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38787 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:40934 deadline: 1732474556773, exception=org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1732474530456.39086109d05e3e13c2ae0a9ad08f12fd. is not online on f2b92657890a,38787,1732474529015 2024-11-24T18:55:46,806 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,,1732474530456.39086109d05e3e13c2ae0a9ad08f12fd., hostname=f2b92657890a,38787,1732474529015, seqNum=2 , the old value is region=TestLogRolling-testLogRolling,,1732474530456.39086109d05e3e13c2ae0a9ad08f12fd., hostname=f2b92657890a,38787,1732474529015, seqNum=2, error=org.apache.hadoop.hbase.NotServingRegionException: org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1732474530456.39086109d05e3e13c2ae0a9ad08f12fd. is not online on f2b92657890a,38787,1732474529015 at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegionByEncodedName(HRegionServer.java:3186) at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegion(HRegionServer.java:3164) at org.apache.hadoop.hbase.regionserver.RSRpcServices.getRegion(RSRpcServices.java:1413) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2943) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-24T18:55:46,806 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,,1732474530456.39086109d05e3e13c2ae0a9ad08f12fd., hostname=f2b92657890a,38787,1732474529015, seqNum=2 is org.apache.hadoop.hbase.NotServingRegionException: org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1732474530456.39086109d05e3e13c2ae0a9ad08f12fd. is not online on f2b92657890a,38787,1732474529015 at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegionByEncodedName(HRegionServer.java:3186) at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegion(HRegionServer.java:3164) at org.apache.hadoop.hbase.regionserver.RSRpcServices.getRegion(RSRpcServices.java:1413) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2943) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-24T18:55:46,806 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(88): Try removing region=TestLogRolling-testLogRolling,,1732474530456.39086109d05e3e13c2ae0a9ad08f12fd., hostname=f2b92657890a,38787,1732474529015, seqNum=2 from cache 2024-11-24T18:55:47,484 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,43977,1732474387094/f2b92657890a%2C43977%2C1732474387094.1732474387326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:55:47,504 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.meta.1732474386931.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:55:48,485 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,43977,1732474387094/f2b92657890a%2C43977%2C1732474387094.1732474387326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:55:48,505 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.meta.1732474386931.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:55:49,486 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,43977,1732474387094/f2b92657890a%2C43977%2C1732474387094.1732474387326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:55:49,505 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.meta.1732474386931.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:55:50,020 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:55:50,020 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:55:50,021 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:55:50,021 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:55:50,021 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:55:50,021 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:55:50,022 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:55:50,022 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:55:50,047 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:55:50,047 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:55:50,047 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:55:50,047 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:55:50,047 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:55:50,048 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:55:50,052 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:55:50,052 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:55:50,052 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:55:50,054 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:55:50,486 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,43977,1732474387094/f2b92657890a%2C43977%2C1732474387094.1732474387326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:55:50,506 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.meta.1732474386931.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:55:50,562 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-24T18:55:50,563 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:55:50,563 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:55:50,563 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:55:50,563 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:55:50,563 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:55:50,564 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:55:50,564 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:55:50,565 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:55:50,589 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:55:50,589 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:55:50,589 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:55:50,590 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:55:50,590 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:55:50,590 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:55:50,594 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:55:50,595 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:55:50,595 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:55:50,598 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:55:51,487 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,43977,1732474387094/f2b92657890a%2C43977%2C1732474387094.1732474387326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:55:51,506 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.meta.1732474386931.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:55:52,488 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,43977,1732474387094/f2b92657890a%2C43977%2C1732474387094.1732474387326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:55:52,507 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.meta.1732474386931.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:55:53,488 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,43977,1732474387094/f2b92657890a%2C43977%2C1732474387094.1732474387326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:55:53,508 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.meta.1732474386931.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:55:54,489 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,43977,1732474387094/f2b92657890a%2C43977%2C1732474387094.1732474387326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:55:54,508 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.meta.1732474386931.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:55:55,489 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,43977,1732474387094/f2b92657890a%2C43977%2C1732474387094.1732474387326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:55:55,508 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.meta.1732474386931.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:55:56,490 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,43977,1732474387094/f2b92657890a%2C43977%2C1732474387094.1732474387326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:55:56,509 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.meta.1732474386931.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:55:56,911 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRolling', row='row0097', locateType=CURRENT is [region=TestLogRolling-testLogRolling,row0062,1732474544798.4767344858182c51ec061a20b9011b4b., hostname=f2b92657890a,38787,1732474529015, seqNum=127] 2024-11-24T18:55:56,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38787 {}] regionserver.HRegion(8855): Flush requested on 4767344858182c51ec061a20b9011b4b 2024-11-24T18:55:56,924 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 4767344858182c51ec061a20b9011b4b 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-24T18:55:56,929 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/4767344858182c51ec061a20b9011b4b/.tmp/info/b9ba810b5e8a4218a4f764de0ee61e70 is 1080, key is row0097/info:/1732474556913/Put/seqid=0 2024-11-24T18:55:56,935 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46341 is added to blk_1073741856_1032 (size=12516) 2024-11-24T18:55:56,935 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44655 is added to blk_1073741856_1032 (size=12516) 2024-11-24T18:55:56,935 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=137 (bloomFilter=true), to=hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/4767344858182c51ec061a20b9011b4b/.tmp/info/b9ba810b5e8a4218a4f764de0ee61e70 2024-11-24T18:55:56,941 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/4767344858182c51ec061a20b9011b4b/.tmp/info/b9ba810b5e8a4218a4f764de0ee61e70 as hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/4767344858182c51ec061a20b9011b4b/info/b9ba810b5e8a4218a4f764de0ee61e70 2024-11-24T18:55:56,951 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/4767344858182c51ec061a20b9011b4b/info/b9ba810b5e8a4218a4f764de0ee61e70, entries=7, sequenceid=137, filesize=12.2 K 2024-11-24T18:55:56,952 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=13.66 KB/13988 for 4767344858182c51ec061a20b9011b4b in 28ms, sequenceid=137, compaction requested=false 2024-11-24T18:55:56,952 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 4767344858182c51ec061a20b9011b4b: 2024-11-24T18:55:56,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38787 {}] regionserver.HRegion(8855): Flush requested on 4767344858182c51ec061a20b9011b4b 2024-11-24T18:55:56,953 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 4767344858182c51ec061a20b9011b4b 1/1 column families, dataSize=14.71 KB heapSize=16 KB 2024-11-24T18:55:56,957 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/4767344858182c51ec061a20b9011b4b/.tmp/info/d20a1ac108ac43428a4bee7f5c55b3a9 is 1080, key is row0104/info:/1732474556925/Put/seqid=0 2024-11-24T18:55:56,968 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44655 is added to blk_1073741857_1033 (size=20078) 2024-11-24T18:55:56,968 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46341 is added to blk_1073741857_1033 (size=20078) 2024-11-24T18:55:56,969 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=14.71 KB at sequenceid=154 (bloomFilter=true), to=hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/4767344858182c51ec061a20b9011b4b/.tmp/info/d20a1ac108ac43428a4bee7f5c55b3a9 2024-11-24T18:55:56,974 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/4767344858182c51ec061a20b9011b4b/.tmp/info/d20a1ac108ac43428a4bee7f5c55b3a9 as hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/4767344858182c51ec061a20b9011b4b/info/d20a1ac108ac43428a4bee7f5c55b3a9 2024-11-24T18:55:56,980 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/4767344858182c51ec061a20b9011b4b/info/d20a1ac108ac43428a4bee7f5c55b3a9, entries=14, sequenceid=154, filesize=19.6 K 2024-11-24T18:55:56,981 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~14.71 KB/15064, heapSize ~15.98 KB/16368, currentSize=11.56 KB/11836 for 4767344858182c51ec061a20b9011b4b in 29ms, sequenceid=154, compaction requested=true 2024-11-24T18:55:56,981 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 4767344858182c51ec061a20b9011b4b: 2024-11-24T18:55:56,981 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 4767344858182c51ec061a20b9011b4b:info, priority=-2147483648, current under compaction store size is 1 2024-11-24T18:55:56,981 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-24T18:55:56,981 DEBUG [RS:0;f2b92657890a:38787-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-24T18:55:56,983 DEBUG [RS:0;f2b92657890a:38787-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 75578 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-24T18:55:56,983 DEBUG [RS:0;f2b92657890a:38787-shortCompactions-0 {}] regionserver.HStore(1541): 4767344858182c51ec061a20b9011b4b/info is initiating minor compaction (all files) 2024-11-24T18:55:56,983 INFO [RS:0;f2b92657890a:38787-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 4767344858182c51ec061a20b9011b4b/info in TestLogRolling-testLogRolling,row0062,1732474544798.4767344858182c51ec061a20b9011b4b. 2024-11-24T18:55:56,983 INFO [RS:0;f2b92657890a:38787-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/4767344858182c51ec061a20b9011b4b/info/b52819689fd1401595cf36002beb852f, hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/4767344858182c51ec061a20b9011b4b/info/b9ba810b5e8a4218a4f764de0ee61e70, hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/4767344858182c51ec061a20b9011b4b/info/d20a1ac108ac43428a4bee7f5c55b3a9] into tmpdir=hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/4767344858182c51ec061a20b9011b4b/.tmp, totalSize=73.8 K 2024-11-24T18:55:56,983 DEBUG [RS:0;f2b92657890a:38787-shortCompactions-0 {}] compactions.Compactor(225): Compacting b52819689fd1401595cf36002beb852f, keycount=35, bloomtype=ROW, size=42.0 K, encoding=NONE, compression=NONE, seqNum=123, earliestPutTs=1732474542689 2024-11-24T18:55:56,984 DEBUG [RS:0;f2b92657890a:38787-shortCompactions-0 {}] compactions.Compactor(225): Compacting b9ba810b5e8a4218a4f764de0ee61e70, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=137, earliestPutTs=1732474556913 2024-11-24T18:55:56,984 DEBUG [RS:0;f2b92657890a:38787-shortCompactions-0 {}] compactions.Compactor(225): Compacting d20a1ac108ac43428a4bee7f5c55b3a9, keycount=14, bloomtype=ROW, size=19.6 K, encoding=NONE, compression=NONE, seqNum=154, earliestPutTs=1732474556925 2024-11-24T18:55:56,997 INFO [RS:0;f2b92657890a:38787-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 4767344858182c51ec061a20b9011b4b#info#compaction#72 average throughput is 28.73 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-24T18:55:56,998 DEBUG [RS:0;f2b92657890a:38787-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/4767344858182c51ec061a20b9011b4b/.tmp/info/ace5f0f8fe734b76ab9c067ce0c8846a is 1080, key is row0062/info:/1732474542689/Put/seqid=0 2024-11-24T18:55:57,003 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46341 is added to blk_1073741858_1034 (size=65792) 2024-11-24T18:55:57,003 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44655 is added to blk_1073741858_1034 (size=65792) 2024-11-24T18:55:57,010 DEBUG [RS:0;f2b92657890a:38787-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/4767344858182c51ec061a20b9011b4b/.tmp/info/ace5f0f8fe734b76ab9c067ce0c8846a as hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/4767344858182c51ec061a20b9011b4b/info/ace5f0f8fe734b76ab9c067ce0c8846a 2024-11-24T18:55:57,017 INFO [RS:0;f2b92657890a:38787-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 4767344858182c51ec061a20b9011b4b/info of 4767344858182c51ec061a20b9011b4b into ace5f0f8fe734b76ab9c067ce0c8846a(size=64.3 K), total size for store is 64.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-24T18:55:57,017 DEBUG [RS:0;f2b92657890a:38787-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 4767344858182c51ec061a20b9011b4b: 2024-11-24T18:55:57,017 INFO [RS:0;f2b92657890a:38787-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1732474544798.4767344858182c51ec061a20b9011b4b., storeName=4767344858182c51ec061a20b9011b4b/info, priority=13, startTime=1732474556981; duration=0sec 2024-11-24T18:55:57,017 DEBUG [RS:0;f2b92657890a:38787-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-24T18:55:57,017 DEBUG [RS:0;f2b92657890a:38787-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 4767344858182c51ec061a20b9011b4b:info 2024-11-24T18:55:57,491 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,43977,1732474387094/f2b92657890a%2C43977%2C1732474387094.1732474387326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:55:57,510 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.meta.1732474386931.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:55:58,491 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,43977,1732474387094/f2b92657890a%2C43977%2C1732474387094.1732474387326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:55:58,510 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.meta.1732474386931.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:55:58,826 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-24T18:55:58,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38787 {}] regionserver.HRegion(8855): Flush requested on 4767344858182c51ec061a20b9011b4b 2024-11-24T18:55:58,978 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 4767344858182c51ec061a20b9011b4b 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-11-24T18:55:58,982 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/4767344858182c51ec061a20b9011b4b/.tmp/info/72dfcb0afe434fa391204141a94775b7 is 1080, key is row0118/info:/1732474556954/Put/seqid=0 2024-11-24T18:55:58,987 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46341 is added to blk_1073741859_1035 (size=17906) 2024-11-24T18:55:58,987 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44655 is added to blk_1073741859_1035 (size=17906) 2024-11-24T18:55:59,016 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38787 {}] regionserver.HRegion(5310): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=4767344858182c51ec061a20b9011b4b, server=f2b92657890a,38787,1732474529015 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-11-24T18:55:59,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38787 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:40934 deadline: 1732474569015, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=4767344858182c51ec061a20b9011b4b, server=f2b92657890a,38787,1732474529015 2024-11-24T18:55:59,016 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,row0062,1732474544798.4767344858182c51ec061a20b9011b4b., hostname=f2b92657890a,38787,1732474529015, seqNum=127 , the old value is region=TestLogRolling-testLogRolling,row0062,1732474544798.4767344858182c51ec061a20b9011b4b., hostname=f2b92657890a,38787,1732474529015, seqNum=127, error=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=4767344858182c51ec061a20b9011b4b, server=f2b92657890a,38787,1732474529015 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-24T18:55:59,017 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,row0062,1732474544798.4767344858182c51ec061a20b9011b4b., hostname=f2b92657890a,38787,1732474529015, seqNum=127 is org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=4767344858182c51ec061a20b9011b4b, server=f2b92657890a,38787,1732474529015 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-24T18:55:59,017 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(76): Will not update region=TestLogRolling-testLogRolling,row0062,1732474544798.4767344858182c51ec061a20b9011b4b., hostname=f2b92657890a,38787,1732474529015, seqNum=127 because the exception is null or not the one we care about 2024-11-24T18:55:59,388 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=170 (bloomFilter=true), to=hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/4767344858182c51ec061a20b9011b4b/.tmp/info/72dfcb0afe434fa391204141a94775b7 2024-11-24T18:55:59,399 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/4767344858182c51ec061a20b9011b4b/.tmp/info/72dfcb0afe434fa391204141a94775b7 as hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/4767344858182c51ec061a20b9011b4b/info/72dfcb0afe434fa391204141a94775b7 2024-11-24T18:55:59,405 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/4767344858182c51ec061a20b9011b4b/info/72dfcb0afe434fa391204141a94775b7, entries=12, sequenceid=170, filesize=17.5 K 2024-11-24T18:55:59,406 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=17.86 KB/18292 for 4767344858182c51ec061a20b9011b4b in 428ms, sequenceid=170, compaction requested=false 2024-11-24T18:55:59,406 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 4767344858182c51ec061a20b9011b4b: 2024-11-24T18:55:59,492 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,43977,1732474387094/f2b92657890a%2C43977%2C1732474387094.1732474387326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:55:59,511 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.meta.1732474386931.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:56:00,493 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,43977,1732474387094/f2b92657890a%2C43977%2C1732474387094.1732474387326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:56:00,512 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.meta.1732474386931.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:56:01,493 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,43977,1732474387094/f2b92657890a%2C43977%2C1732474387094.1732474387326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:56:01,512 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.meta.1732474386931.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:56:02,494 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,43977,1732474387094/f2b92657890a%2C43977%2C1732474387094.1732474387326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:56:02,513 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.meta.1732474386931.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:56:03,495 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,43977,1732474387094/f2b92657890a%2C43977%2C1732474387094.1732474387326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:56:03,513 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.meta.1732474386931.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:56:04,495 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,43977,1732474387094/f2b92657890a%2C43977%2C1732474387094.1732474387326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:56:04,514 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.meta.1732474386931.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:56:05,496 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,43977,1732474387094/f2b92657890a%2C43977%2C1732474387094.1732474387326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:56:05,515 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.meta.1732474386931.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:56:06,496 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,43977,1732474387094/f2b92657890a%2C43977%2C1732474387094.1732474387326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:56:06,515 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.meta.1732474386931.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:56:07,497 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,43977,1732474387094/f2b92657890a%2C43977%2C1732474387094.1732474387326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:56:07,516 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.meta.1732474386931.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:56:08,497 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,43977,1732474387094/f2b92657890a%2C43977%2C1732474387094.1732474387326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:56:08,516 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.meta.1732474386931.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:56:09,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38787 {}] regionserver.HRegion(8855): Flush requested on 4767344858182c51ec061a20b9011b4b 2024-11-24T18:56:09,113 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 4767344858182c51ec061a20b9011b4b 1/1 column families, dataSize=18.91 KB heapSize=20.50 KB 2024-11-24T18:56:09,118 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/4767344858182c51ec061a20b9011b4b/.tmp/info/2fda97c21de648be95199d03e7f54c32 is 1080, key is row0130/info:/1732474558979/Put/seqid=0 2024-11-24T18:56:09,127 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46341 is added to blk_1073741860_1036 (size=24394) 2024-11-24T18:56:09,127 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44655 is added to blk_1073741860_1036 (size=24394) 2024-11-24T18:56:09,128 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=18.91 KB at sequenceid=191 (bloomFilter=true), to=hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/4767344858182c51ec061a20b9011b4b/.tmp/info/2fda97c21de648be95199d03e7f54c32 2024-11-24T18:56:09,137 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38787 {}] regionserver.HRegion(5310): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=4767344858182c51ec061a20b9011b4b, server=f2b92657890a,38787,1732474529015 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-11-24T18:56:09,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38787 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:40934 deadline: 1732474579137, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=4767344858182c51ec061a20b9011b4b, server=f2b92657890a,38787,1732474529015 2024-11-24T18:56:09,138 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,row0062,1732474544798.4767344858182c51ec061a20b9011b4b., hostname=f2b92657890a,38787,1732474529015, seqNum=127 , the old value is region=TestLogRolling-testLogRolling,row0062,1732474544798.4767344858182c51ec061a20b9011b4b., hostname=f2b92657890a,38787,1732474529015, seqNum=127, error=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=4767344858182c51ec061a20b9011b4b, server=f2b92657890a,38787,1732474529015 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-24T18:56:09,138 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,row0062,1732474544798.4767344858182c51ec061a20b9011b4b., hostname=f2b92657890a,38787,1732474529015, seqNum=127 is org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=4767344858182c51ec061a20b9011b4b, server=f2b92657890a,38787,1732474529015 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-24T18:56:09,138 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(76): Will not update region=TestLogRolling-testLogRolling,row0062,1732474544798.4767344858182c51ec061a20b9011b4b., hostname=f2b92657890a,38787,1732474529015, seqNum=127 because the exception is null or not the one we care about 2024-11-24T18:56:09,138 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/4767344858182c51ec061a20b9011b4b/.tmp/info/2fda97c21de648be95199d03e7f54c32 as hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/4767344858182c51ec061a20b9011b4b/info/2fda97c21de648be95199d03e7f54c32 2024-11-24T18:56:09,143 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/4767344858182c51ec061a20b9011b4b/info/2fda97c21de648be95199d03e7f54c32, entries=18, sequenceid=191, filesize=23.8 K 2024-11-24T18:56:09,144 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~18.91 KB/19368, heapSize ~20.48 KB/20976, currentSize=11.56 KB/11836 for 4767344858182c51ec061a20b9011b4b in 31ms, sequenceid=191, compaction requested=true 2024-11-24T18:56:09,144 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 4767344858182c51ec061a20b9011b4b: 2024-11-24T18:56:09,144 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 4767344858182c51ec061a20b9011b4b:info, priority=-2147483648, current under compaction store size is 1 2024-11-24T18:56:09,144 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-24T18:56:09,144 DEBUG [RS:0;f2b92657890a:38787-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-24T18:56:09,145 DEBUG [RS:0;f2b92657890a:38787-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 108092 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-24T18:56:09,145 DEBUG [RS:0;f2b92657890a:38787-shortCompactions-0 {}] regionserver.HStore(1541): 4767344858182c51ec061a20b9011b4b/info is initiating minor compaction (all files) 2024-11-24T18:56:09,145 INFO [RS:0;f2b92657890a:38787-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 4767344858182c51ec061a20b9011b4b/info in TestLogRolling-testLogRolling,row0062,1732474544798.4767344858182c51ec061a20b9011b4b. 2024-11-24T18:56:09,146 INFO [RS:0;f2b92657890a:38787-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/4767344858182c51ec061a20b9011b4b/info/ace5f0f8fe734b76ab9c067ce0c8846a, hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/4767344858182c51ec061a20b9011b4b/info/72dfcb0afe434fa391204141a94775b7, hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/4767344858182c51ec061a20b9011b4b/info/2fda97c21de648be95199d03e7f54c32] into tmpdir=hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/4767344858182c51ec061a20b9011b4b/.tmp, totalSize=105.6 K 2024-11-24T18:56:09,146 DEBUG [RS:0;f2b92657890a:38787-shortCompactions-0 {}] compactions.Compactor(225): Compacting ace5f0f8fe734b76ab9c067ce0c8846a, keycount=56, bloomtype=ROW, size=64.3 K, encoding=NONE, compression=NONE, seqNum=154, earliestPutTs=1732474542689 2024-11-24T18:56:09,146 DEBUG [RS:0;f2b92657890a:38787-shortCompactions-0 {}] compactions.Compactor(225): Compacting 72dfcb0afe434fa391204141a94775b7, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=170, earliestPutTs=1732474556954 2024-11-24T18:56:09,147 DEBUG [RS:0;f2b92657890a:38787-shortCompactions-0 {}] compactions.Compactor(225): Compacting 2fda97c21de648be95199d03e7f54c32, keycount=18, bloomtype=ROW, size=23.8 K, encoding=NONE, compression=NONE, seqNum=191, earliestPutTs=1732474558979 2024-11-24T18:56:09,159 INFO [RS:0;f2b92657890a:38787-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 4767344858182c51ec061a20b9011b4b#info#compaction#75 average throughput is 29.42 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-24T18:56:09,160 DEBUG [RS:0;f2b92657890a:38787-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/4767344858182c51ec061a20b9011b4b/.tmp/info/1f2a6ea9b78147dead3e8b40073916f2 is 1080, key is row0062/info:/1732474542689/Put/seqid=0 2024-11-24T18:56:09,165 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46341 is added to blk_1073741861_1037 (size=98311) 2024-11-24T18:56:09,165 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44655 is added to blk_1073741861_1037 (size=98311) 2024-11-24T18:56:09,171 DEBUG [RS:0;f2b92657890a:38787-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/4767344858182c51ec061a20b9011b4b/.tmp/info/1f2a6ea9b78147dead3e8b40073916f2 as hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/4767344858182c51ec061a20b9011b4b/info/1f2a6ea9b78147dead3e8b40073916f2 2024-11-24T18:56:09,178 INFO [RS:0;f2b92657890a:38787-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 4767344858182c51ec061a20b9011b4b/info of 4767344858182c51ec061a20b9011b4b into 1f2a6ea9b78147dead3e8b40073916f2(size=96.0 K), total size for store is 96.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-24T18:56:09,178 DEBUG [RS:0;f2b92657890a:38787-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 4767344858182c51ec061a20b9011b4b: 2024-11-24T18:56:09,178 INFO [RS:0;f2b92657890a:38787-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1732474544798.4767344858182c51ec061a20b9011b4b., storeName=4767344858182c51ec061a20b9011b4b/info, priority=13, startTime=1732474569144; duration=0sec 2024-11-24T18:56:09,178 DEBUG [RS:0;f2b92657890a:38787-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-24T18:56:09,178 DEBUG [RS:0;f2b92657890a:38787-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 4767344858182c51ec061a20b9011b4b:info 2024-11-24T18:56:09,498 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,43977,1732474387094/f2b92657890a%2C43977%2C1732474387094.1732474387326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:56:09,517 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.meta.1732474386931.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:56:10,378 INFO [master/f2b92657890a:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-24T18:56:10,378 INFO [master/f2b92657890a:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-24T18:56:10,499 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,43977,1732474387094/f2b92657890a%2C43977%2C1732474387094.1732474387326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:56:10,517 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.meta.1732474386931.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:56:11,499 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,43977,1732474387094/f2b92657890a%2C43977%2C1732474387094.1732474387326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:56:11,518 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.meta.1732474386931.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:56:12,500 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,43977,1732474387094/f2b92657890a%2C43977%2C1732474387094.1732474387326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:56:12,518 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.meta.1732474386931.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:56:13,500 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,43977,1732474387094/f2b92657890a%2C43977%2C1732474387094.1732474387326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:56:13,519 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.meta.1732474386931.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:56:14,501 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,43977,1732474387094/f2b92657890a%2C43977%2C1732474387094.1732474387326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:56:14,519 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.meta.1732474386931.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:56:15,017 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 1588230740, had cached 0 bytes from a total of 20340 2024-11-24T18:56:15,501 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,43977,1732474387094/f2b92657890a%2C43977%2C1732474387094.1732474387326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:56:15,520 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.meta.1732474386931.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:56:16,502 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,43977,1732474387094/f2b92657890a%2C43977%2C1732474387094.1732474387326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:56:16,520 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.meta.1732474386931.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:56:17,503 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,43977,1732474387094/f2b92657890a%2C43977%2C1732474387094.1732474387326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:56:17,521 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.meta.1732474386931.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:56:18,503 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,43977,1732474387094/f2b92657890a%2C43977%2C1732474387094.1732474387326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:56:18,522 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.meta.1732474386931.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:56:18,929 DEBUG [master/f2b92657890a:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): data stats (chunk size=2097152): current pool size=3, created chunk count=9, reused chunk count=63, reuseRatio=87.50% 2024-11-24T18:56:18,929 DEBUG [master/f2b92657890a:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): index stats (chunk size=209715): current pool size=0, created chunk count=0, reused chunk count=0, reuseRatio=0 2024-11-24T18:56:19,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38787 {}] regionserver.HRegion(8855): Flush requested on 4767344858182c51ec061a20b9011b4b 2024-11-24T18:56:19,144 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 4767344858182c51ec061a20b9011b4b 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-11-24T18:56:19,151 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/4767344858182c51ec061a20b9011b4b/.tmp/info/28db862bd5914e94ad8236c41443a4fe is 1080, key is row0148/info:/1732474569114/Put/seqid=0 2024-11-24T18:56:19,155 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46341 is added to blk_1073741862_1038 (size=17906) 2024-11-24T18:56:19,156 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44655 is added to blk_1073741862_1038 (size=17906) 2024-11-24T18:56:19,156 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=207 (bloomFilter=true), to=hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/4767344858182c51ec061a20b9011b4b/.tmp/info/28db862bd5914e94ad8236c41443a4fe 2024-11-24T18:56:19,162 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/4767344858182c51ec061a20b9011b4b/.tmp/info/28db862bd5914e94ad8236c41443a4fe as hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/4767344858182c51ec061a20b9011b4b/info/28db862bd5914e94ad8236c41443a4fe 2024-11-24T18:56:19,167 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/4767344858182c51ec061a20b9011b4b/info/28db862bd5914e94ad8236c41443a4fe, entries=12, sequenceid=207, filesize=17.5 K 2024-11-24T18:56:19,168 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=1.05 KB/1076 for 4767344858182c51ec061a20b9011b4b in 24ms, sequenceid=207, compaction requested=false 2024-11-24T18:56:19,168 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 4767344858182c51ec061a20b9011b4b: 2024-11-24T18:56:19,504 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,43977,1732474387094/f2b92657890a%2C43977%2C1732474387094.1732474387326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:56:19,522 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.meta.1732474386931.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:56:20,505 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,43977,1732474387094/f2b92657890a%2C43977%2C1732474387094.1732474387326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:56:20,523 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.meta.1732474386931.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:56:21,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38787 {}] regionserver.HRegion(8855): Flush requested on 4767344858182c51ec061a20b9011b4b 2024-11-24T18:56:21,160 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 4767344858182c51ec061a20b9011b4b 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-24T18:56:21,165 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/4767344858182c51ec061a20b9011b4b/.tmp/info/c62e1f6727d34350a53c49ff4f7d0cad is 1080, key is row0160/info:/1732474579145/Put/seqid=0 2024-11-24T18:56:21,170 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46341 is added to blk_1073741863_1039 (size=12516) 2024-11-24T18:56:21,171 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44655 is added to blk_1073741863_1039 (size=12516) 2024-11-24T18:56:21,171 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=217 (bloomFilter=true), to=hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/4767344858182c51ec061a20b9011b4b/.tmp/info/c62e1f6727d34350a53c49ff4f7d0cad 2024-11-24T18:56:21,178 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/4767344858182c51ec061a20b9011b4b/.tmp/info/c62e1f6727d34350a53c49ff4f7d0cad as hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/4767344858182c51ec061a20b9011b4b/info/c62e1f6727d34350a53c49ff4f7d0cad 2024-11-24T18:56:21,184 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/4767344858182c51ec061a20b9011b4b/info/c62e1f6727d34350a53c49ff4f7d0cad, entries=7, sequenceid=217, filesize=12.2 K 2024-11-24T18:56:21,185 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=10.51 KB/10760 for 4767344858182c51ec061a20b9011b4b in 25ms, sequenceid=217, compaction requested=true 2024-11-24T18:56:21,185 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 4767344858182c51ec061a20b9011b4b: 2024-11-24T18:56:21,185 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 4767344858182c51ec061a20b9011b4b:info, priority=-2147483648, current under compaction store size is 1 2024-11-24T18:56:21,185 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-24T18:56:21,185 DEBUG [RS:0;f2b92657890a:38787-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-24T18:56:21,186 DEBUG [RS:0;f2b92657890a:38787-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 128733 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-24T18:56:21,187 DEBUG [RS:0;f2b92657890a:38787-shortCompactions-0 {}] regionserver.HStore(1541): 4767344858182c51ec061a20b9011b4b/info is initiating minor compaction (all files) 2024-11-24T18:56:21,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38787 {}] regionserver.HRegion(8855): Flush requested on 4767344858182c51ec061a20b9011b4b 2024-11-24T18:56:21,187 INFO [RS:0;f2b92657890a:38787-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 4767344858182c51ec061a20b9011b4b/info in TestLogRolling-testLogRolling,row0062,1732474544798.4767344858182c51ec061a20b9011b4b. 2024-11-24T18:56:21,187 INFO [RS:0;f2b92657890a:38787-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/4767344858182c51ec061a20b9011b4b/info/1f2a6ea9b78147dead3e8b40073916f2, hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/4767344858182c51ec061a20b9011b4b/info/28db862bd5914e94ad8236c41443a4fe, hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/4767344858182c51ec061a20b9011b4b/info/c62e1f6727d34350a53c49ff4f7d0cad] into tmpdir=hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/4767344858182c51ec061a20b9011b4b/.tmp, totalSize=125.7 K 2024-11-24T18:56:21,187 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 4767344858182c51ec061a20b9011b4b 1/1 column families, dataSize=11.56 KB heapSize=12.63 KB 2024-11-24T18:56:21,187 DEBUG [RS:0;f2b92657890a:38787-shortCompactions-0 {}] compactions.Compactor(225): Compacting 1f2a6ea9b78147dead3e8b40073916f2, keycount=86, bloomtype=ROW, size=96.0 K, encoding=NONE, compression=NONE, seqNum=191, earliestPutTs=1732474542689 2024-11-24T18:56:21,188 DEBUG [RS:0;f2b92657890a:38787-shortCompactions-0 {}] compactions.Compactor(225): Compacting 28db862bd5914e94ad8236c41443a4fe, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=207, earliestPutTs=1732474569114 2024-11-24T18:56:21,188 DEBUG [RS:0;f2b92657890a:38787-shortCompactions-0 {}] compactions.Compactor(225): Compacting c62e1f6727d34350a53c49ff4f7d0cad, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=217, earliestPutTs=1732474579145 2024-11-24T18:56:21,192 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/4767344858182c51ec061a20b9011b4b/.tmp/info/abf0203130234a3895f51a3ae6a27fd4 is 1080, key is row0167/info:/1732474581161/Put/seqid=0 2024-11-24T18:56:21,198 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46341 is added to blk_1073741864_1040 (size=16828) 2024-11-24T18:56:21,198 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44655 is added to blk_1073741864_1040 (size=16828) 2024-11-24T18:56:21,199 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.56 KB at sequenceid=231 (bloomFilter=true), to=hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/4767344858182c51ec061a20b9011b4b/.tmp/info/abf0203130234a3895f51a3ae6a27fd4 2024-11-24T18:56:21,206 INFO [RS:0;f2b92657890a:38787-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 4767344858182c51ec061a20b9011b4b#info#compaction#79 average throughput is 35.92 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-24T18:56:21,207 DEBUG [RS:0;f2b92657890a:38787-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/4767344858182c51ec061a20b9011b4b/.tmp/info/3087356085d448e690a0ad74947b1701 is 1080, key is row0062/info:/1732474542689/Put/seqid=0 2024-11-24T18:56:21,211 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/4767344858182c51ec061a20b9011b4b/.tmp/info/abf0203130234a3895f51a3ae6a27fd4 as hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/4767344858182c51ec061a20b9011b4b/info/abf0203130234a3895f51a3ae6a27fd4 2024-11-24T18:56:21,217 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/4767344858182c51ec061a20b9011b4b/info/abf0203130234a3895f51a3ae6a27fd4, entries=11, sequenceid=231, filesize=16.4 K 2024-11-24T18:56:21,218 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~11.56 KB/11836, heapSize ~12.61 KB/12912, currentSize=12.61 KB/12912 for 4767344858182c51ec061a20b9011b4b in 31ms, sequenceid=231, compaction requested=false 2024-11-24T18:56:21,218 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 4767344858182c51ec061a20b9011b4b: 2024-11-24T18:56:21,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38787 {}] regionserver.HRegion(8855): Flush requested on 4767344858182c51ec061a20b9011b4b 2024-11-24T18:56:21,219 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 4767344858182c51ec061a20b9011b4b 1/1 column families, dataSize=13.66 KB heapSize=14.88 KB 2024-11-24T18:56:21,225 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/4767344858182c51ec061a20b9011b4b/.tmp/info/0621032bb8844e5fba08fdf7266f99f2 is 1080, key is row0178/info:/1732474581188/Put/seqid=0 2024-11-24T18:56:21,226 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44655 is added to blk_1073741865_1041 (size=118899) 2024-11-24T18:56:21,227 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46341 is added to blk_1073741865_1041 (size=118899) 2024-11-24T18:56:21,230 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44655 is added to blk_1073741866_1042 (size=19000) 2024-11-24T18:56:21,231 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46341 is added to blk_1073741866_1042 (size=19000) 2024-11-24T18:56:21,231 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.66 KB at sequenceid=247 (bloomFilter=true), to=hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/4767344858182c51ec061a20b9011b4b/.tmp/info/0621032bb8844e5fba08fdf7266f99f2 2024-11-24T18:56:21,233 DEBUG [RS:0;f2b92657890a:38787-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/4767344858182c51ec061a20b9011b4b/.tmp/info/3087356085d448e690a0ad74947b1701 as hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/4767344858182c51ec061a20b9011b4b/info/3087356085d448e690a0ad74947b1701 2024-11-24T18:56:21,237 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/4767344858182c51ec061a20b9011b4b/.tmp/info/0621032bb8844e5fba08fdf7266f99f2 as hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/4767344858182c51ec061a20b9011b4b/info/0621032bb8844e5fba08fdf7266f99f2 2024-11-24T18:56:21,238 INFO [RS:0;f2b92657890a:38787-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 4767344858182c51ec061a20b9011b4b/info of 4767344858182c51ec061a20b9011b4b into 3087356085d448e690a0ad74947b1701(size=116.1 K), total size for store is 132.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-24T18:56:21,238 DEBUG [RS:0;f2b92657890a:38787-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 4767344858182c51ec061a20b9011b4b: 2024-11-24T18:56:21,239 INFO [RS:0;f2b92657890a:38787-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1732474544798.4767344858182c51ec061a20b9011b4b., storeName=4767344858182c51ec061a20b9011b4b/info, priority=13, startTime=1732474581185; duration=0sec 2024-11-24T18:56:21,239 DEBUG [RS:0;f2b92657890a:38787-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-24T18:56:21,239 DEBUG [RS:0;f2b92657890a:38787-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 4767344858182c51ec061a20b9011b4b:info 2024-11-24T18:56:21,242 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/4767344858182c51ec061a20b9011b4b/info/0621032bb8844e5fba08fdf7266f99f2, entries=13, sequenceid=247, filesize=18.6 K 2024-11-24T18:56:21,243 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~13.66 KB/13988, heapSize ~14.86 KB/15216, currentSize=2.10 KB/2152 for 4767344858182c51ec061a20b9011b4b in 24ms, sequenceid=247, compaction requested=true 2024-11-24T18:56:21,243 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 4767344858182c51ec061a20b9011b4b: 2024-11-24T18:56:21,243 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 4767344858182c51ec061a20b9011b4b:info, priority=-2147483648, current under compaction store size is 1 2024-11-24T18:56:21,243 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-24T18:56:21,243 DEBUG [RS:0;f2b92657890a:38787-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-24T18:56:21,244 DEBUG [RS:0;f2b92657890a:38787-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 154727 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-24T18:56:21,244 DEBUG [RS:0;f2b92657890a:38787-shortCompactions-0 {}] regionserver.HStore(1541): 4767344858182c51ec061a20b9011b4b/info is initiating minor compaction (all files) 2024-11-24T18:56:21,245 INFO [RS:0;f2b92657890a:38787-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 4767344858182c51ec061a20b9011b4b/info in TestLogRolling-testLogRolling,row0062,1732474544798.4767344858182c51ec061a20b9011b4b. 2024-11-24T18:56:21,245 INFO [RS:0;f2b92657890a:38787-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/4767344858182c51ec061a20b9011b4b/info/3087356085d448e690a0ad74947b1701, hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/4767344858182c51ec061a20b9011b4b/info/abf0203130234a3895f51a3ae6a27fd4, hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/4767344858182c51ec061a20b9011b4b/info/0621032bb8844e5fba08fdf7266f99f2] into tmpdir=hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/4767344858182c51ec061a20b9011b4b/.tmp, totalSize=151.1 K 2024-11-24T18:56:21,245 DEBUG [RS:0;f2b92657890a:38787-shortCompactions-0 {}] compactions.Compactor(225): Compacting 3087356085d448e690a0ad74947b1701, keycount=105, bloomtype=ROW, size=116.1 K, encoding=NONE, compression=NONE, seqNum=217, earliestPutTs=1732474542689 2024-11-24T18:56:21,246 DEBUG [RS:0;f2b92657890a:38787-shortCompactions-0 {}] compactions.Compactor(225): Compacting abf0203130234a3895f51a3ae6a27fd4, keycount=11, bloomtype=ROW, size=16.4 K, encoding=NONE, compression=NONE, seqNum=231, earliestPutTs=1732474581161 2024-11-24T18:56:21,246 DEBUG [RS:0;f2b92657890a:38787-shortCompactions-0 {}] compactions.Compactor(225): Compacting 0621032bb8844e5fba08fdf7266f99f2, keycount=13, bloomtype=ROW, size=18.6 K, encoding=NONE, compression=NONE, seqNum=247, earliestPutTs=1732474581188 2024-11-24T18:56:21,260 INFO [RS:0;f2b92657890a:38787-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 4767344858182c51ec061a20b9011b4b#info#compaction#81 average throughput is 44.12 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-24T18:56:21,261 DEBUG [RS:0;f2b92657890a:38787-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/4767344858182c51ec061a20b9011b4b/.tmp/info/6a12799a474b4672a14112a05bfe600b is 1080, key is row0062/info:/1732474542689/Put/seqid=0 2024-11-24T18:56:21,268 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44655 is added to blk_1073741867_1043 (size=145078) 2024-11-24T18:56:21,268 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46341 is added to blk_1073741867_1043 (size=145078) 2024-11-24T18:56:21,505 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,43977,1732474387094/f2b92657890a%2C43977%2C1732474387094.1732474387326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:56:21,523 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.meta.1732474386931.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:56:21,674 DEBUG [RS:0;f2b92657890a:38787-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/4767344858182c51ec061a20b9011b4b/.tmp/info/6a12799a474b4672a14112a05bfe600b as hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/4767344858182c51ec061a20b9011b4b/info/6a12799a474b4672a14112a05bfe600b 2024-11-24T18:56:21,681 INFO [RS:0;f2b92657890a:38787-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 4767344858182c51ec061a20b9011b4b/info of 4767344858182c51ec061a20b9011b4b into 6a12799a474b4672a14112a05bfe600b(size=141.7 K), total size for store is 141.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-24T18:56:21,681 DEBUG [RS:0;f2b92657890a:38787-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 4767344858182c51ec061a20b9011b4b: 2024-11-24T18:56:21,681 INFO [RS:0;f2b92657890a:38787-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1732474544798.4767344858182c51ec061a20b9011b4b., storeName=4767344858182c51ec061a20b9011b4b/info, priority=13, startTime=1732474581243; duration=0sec 2024-11-24T18:56:21,681 DEBUG [RS:0;f2b92657890a:38787-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-24T18:56:21,681 DEBUG [RS:0;f2b92657890a:38787-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 4767344858182c51ec061a20b9011b4b:info 2024-11-24T18:56:22,506 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,43977,1732474387094/f2b92657890a%2C43977%2C1732474387094.1732474387326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:56:22,524 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.meta.1732474386931.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:56:23,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38787 {}] regionserver.HRegion(8855): Flush requested on 4767344858182c51ec061a20b9011b4b 2024-11-24T18:56:23,237 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 4767344858182c51ec061a20b9011b4b 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-24T18:56:23,242 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/4767344858182c51ec061a20b9011b4b/.tmp/info/a3c4cbfc05274b2e83b214e6d0ccbd8b is 1080, key is row0191/info:/1732474581221/Put/seqid=0 2024-11-24T18:56:23,257 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46341 is added to blk_1073741868_1044 (size=12519) 2024-11-24T18:56:23,258 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44655 is added to blk_1073741868_1044 (size=12519) 2024-11-24T18:56:23,258 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=259 (bloomFilter=true), to=hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/4767344858182c51ec061a20b9011b4b/.tmp/info/a3c4cbfc05274b2e83b214e6d0ccbd8b 2024-11-24T18:56:23,265 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/4767344858182c51ec061a20b9011b4b/.tmp/info/a3c4cbfc05274b2e83b214e6d0ccbd8b as hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/4767344858182c51ec061a20b9011b4b/info/a3c4cbfc05274b2e83b214e6d0ccbd8b 2024-11-24T18:56:23,271 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/4767344858182c51ec061a20b9011b4b/info/a3c4cbfc05274b2e83b214e6d0ccbd8b, entries=7, sequenceid=259, filesize=12.2 K 2024-11-24T18:56:23,272 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=16.81 KB/17216 for 4767344858182c51ec061a20b9011b4b in 34ms, sequenceid=259, compaction requested=false 2024-11-24T18:56:23,272 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 4767344858182c51ec061a20b9011b4b: 2024-11-24T18:56:23,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38787 {}] regionserver.HRegion(8855): Flush requested on 4767344858182c51ec061a20b9011b4b 2024-11-24T18:56:23,274 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 4767344858182c51ec061a20b9011b4b 1/1 column families, dataSize=18.91 KB heapSize=20.50 KB 2024-11-24T18:56:23,278 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/4767344858182c51ec061a20b9011b4b/.tmp/info/3940ae73c01b43288fcdf130e118021a is 1080, key is row0198/info:/1732474583239/Put/seqid=0 2024-11-24T18:56:23,286 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44655 is added to blk_1073741869_1045 (size=24412) 2024-11-24T18:56:23,286 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46341 is added to blk_1073741869_1045 (size=24412) 2024-11-24T18:56:23,286 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=18.91 KB at sequenceid=280 (bloomFilter=true), to=hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/4767344858182c51ec061a20b9011b4b/.tmp/info/3940ae73c01b43288fcdf130e118021a 2024-11-24T18:56:23,293 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/4767344858182c51ec061a20b9011b4b/.tmp/info/3940ae73c01b43288fcdf130e118021a as hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/4767344858182c51ec061a20b9011b4b/info/3940ae73c01b43288fcdf130e118021a 2024-11-24T18:56:23,298 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/4767344858182c51ec061a20b9011b4b/info/3940ae73c01b43288fcdf130e118021a, entries=18, sequenceid=280, filesize=23.8 K 2024-11-24T18:56:23,299 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~18.91 KB/19368, heapSize ~20.48 KB/20976, currentSize=9.46 KB/9684 for 4767344858182c51ec061a20b9011b4b in 26ms, sequenceid=280, compaction requested=true 2024-11-24T18:56:23,299 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 4767344858182c51ec061a20b9011b4b: 2024-11-24T18:56:23,299 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 4767344858182c51ec061a20b9011b4b:info, priority=-2147483648, current under compaction store size is 1 2024-11-24T18:56:23,299 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-24T18:56:23,299 DEBUG [RS:0;f2b92657890a:38787-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-24T18:56:23,300 DEBUG [RS:0;f2b92657890a:38787-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 182009 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-24T18:56:23,300 DEBUG [RS:0;f2b92657890a:38787-shortCompactions-0 {}] regionserver.HStore(1541): 4767344858182c51ec061a20b9011b4b/info is initiating minor compaction (all files) 2024-11-24T18:56:23,300 INFO [RS:0;f2b92657890a:38787-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 4767344858182c51ec061a20b9011b4b/info in TestLogRolling-testLogRolling,row0062,1732474544798.4767344858182c51ec061a20b9011b4b. 2024-11-24T18:56:23,301 INFO [RS:0;f2b92657890a:38787-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/4767344858182c51ec061a20b9011b4b/info/6a12799a474b4672a14112a05bfe600b, hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/4767344858182c51ec061a20b9011b4b/info/a3c4cbfc05274b2e83b214e6d0ccbd8b, hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/4767344858182c51ec061a20b9011b4b/info/3940ae73c01b43288fcdf130e118021a] into tmpdir=hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/4767344858182c51ec061a20b9011b4b/.tmp, totalSize=177.7 K 2024-11-24T18:56:23,301 DEBUG [RS:0;f2b92657890a:38787-shortCompactions-0 {}] compactions.Compactor(225): Compacting 6a12799a474b4672a14112a05bfe600b, keycount=129, bloomtype=ROW, size=141.7 K, encoding=NONE, compression=NONE, seqNum=247, earliestPutTs=1732474542689 2024-11-24T18:56:23,301 DEBUG [RS:0;f2b92657890a:38787-shortCompactions-0 {}] compactions.Compactor(225): Compacting a3c4cbfc05274b2e83b214e6d0ccbd8b, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=259, earliestPutTs=1732474581221 2024-11-24T18:56:23,302 DEBUG [RS:0;f2b92657890a:38787-shortCompactions-0 {}] compactions.Compactor(225): Compacting 3940ae73c01b43288fcdf130e118021a, keycount=18, bloomtype=ROW, size=23.8 K, encoding=NONE, compression=NONE, seqNum=280, earliestPutTs=1732474583239 2024-11-24T18:56:23,316 INFO [RS:0;f2b92657890a:38787-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 4767344858182c51ec061a20b9011b4b#info#compaction#84 average throughput is 39.51 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-24T18:56:23,317 DEBUG [RS:0;f2b92657890a:38787-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/4767344858182c51ec061a20b9011b4b/.tmp/info/734185fcd7a24e1f846380bcf93ee280 is 1080, key is row0062/info:/1732474542689/Put/seqid=0 2024-11-24T18:56:23,319 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44655 is added to blk_1073741870_1046 (size=172159) 2024-11-24T18:56:23,319 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46341 is added to blk_1073741870_1046 (size=172159) 2024-11-24T18:56:23,326 DEBUG [RS:0;f2b92657890a:38787-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/4767344858182c51ec061a20b9011b4b/.tmp/info/734185fcd7a24e1f846380bcf93ee280 as hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/4767344858182c51ec061a20b9011b4b/info/734185fcd7a24e1f846380bcf93ee280 2024-11-24T18:56:23,333 INFO [RS:0;f2b92657890a:38787-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 4767344858182c51ec061a20b9011b4b/info of 4767344858182c51ec061a20b9011b4b into 734185fcd7a24e1f846380bcf93ee280(size=168.1 K), total size for store is 168.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-24T18:56:23,333 DEBUG [RS:0;f2b92657890a:38787-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 4767344858182c51ec061a20b9011b4b: 2024-11-24T18:56:23,333 INFO [RS:0;f2b92657890a:38787-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1732474544798.4767344858182c51ec061a20b9011b4b., storeName=4767344858182c51ec061a20b9011b4b/info, priority=13, startTime=1732474583299; duration=0sec 2024-11-24T18:56:23,333 DEBUG [RS:0;f2b92657890a:38787-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-24T18:56:23,333 DEBUG [RS:0;f2b92657890a:38787-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 4767344858182c51ec061a20b9011b4b:info 2024-11-24T18:56:23,507 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,43977,1732474387094/f2b92657890a%2C43977%2C1732474387094.1732474387326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:56:23,525 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.meta.1732474386931.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:56:24,507 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,43977,1732474387094/f2b92657890a%2C43977%2C1732474387094.1732474387326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:56:24,526 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.meta.1732474386931.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:56:25,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38787 {}] regionserver.HRegion(8855): Flush requested on 4767344858182c51ec061a20b9011b4b 2024-11-24T18:56:25,296 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 4767344858182c51ec061a20b9011b4b 1/1 column families, dataSize=10.51 KB heapSize=11.50 KB 2024-11-24T18:56:25,301 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/4767344858182c51ec061a20b9011b4b/.tmp/info/b7675966329b4acd8dc0c1ec5ceeea70 is 1080, key is row0216/info:/1732474583275/Put/seqid=0 2024-11-24T18:56:25,327 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46341 is added to blk_1073741871_1047 (size=15760) 2024-11-24T18:56:25,327 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44655 is added to blk_1073741871_1047 (size=15760) 2024-11-24T18:56:25,328 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=10.51 KB at sequenceid=294 (bloomFilter=true), to=hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/4767344858182c51ec061a20b9011b4b/.tmp/info/b7675966329b4acd8dc0c1ec5ceeea70 2024-11-24T18:56:25,334 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/4767344858182c51ec061a20b9011b4b/.tmp/info/b7675966329b4acd8dc0c1ec5ceeea70 as hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/4767344858182c51ec061a20b9011b4b/info/b7675966329b4acd8dc0c1ec5ceeea70 2024-11-24T18:56:25,336 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38787 {}] regionserver.HRegion(5310): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=4767344858182c51ec061a20b9011b4b, server=f2b92657890a,38787,1732474529015 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-11-24T18:56:25,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38787 {}] ipc.CallRunner(138): callId: 255 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:40934 deadline: 1732474595336, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=4767344858182c51ec061a20b9011b4b, server=f2b92657890a,38787,1732474529015 2024-11-24T18:56:25,337 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,row0062,1732474544798.4767344858182c51ec061a20b9011b4b., hostname=f2b92657890a,38787,1732474529015, seqNum=127 , the old value is region=TestLogRolling-testLogRolling,row0062,1732474544798.4767344858182c51ec061a20b9011b4b., hostname=f2b92657890a,38787,1732474529015, seqNum=127, error=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=4767344858182c51ec061a20b9011b4b, server=f2b92657890a,38787,1732474529015 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-24T18:56:25,337 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,row0062,1732474544798.4767344858182c51ec061a20b9011b4b., hostname=f2b92657890a,38787,1732474529015, seqNum=127 is org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=4767344858182c51ec061a20b9011b4b, server=f2b92657890a,38787,1732474529015 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-24T18:56:25,337 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(76): Will not update region=TestLogRolling-testLogRolling,row0062,1732474544798.4767344858182c51ec061a20b9011b4b., hostname=f2b92657890a,38787,1732474529015, seqNum=127 because the exception is null or not the one we care about 2024-11-24T18:56:25,340 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/4767344858182c51ec061a20b9011b4b/info/b7675966329b4acd8dc0c1ec5ceeea70, entries=10, sequenceid=294, filesize=15.4 K 2024-11-24T18:56:25,341 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~10.51 KB/10760, heapSize ~11.48 KB/11760, currentSize=19.96 KB/20444 for 4767344858182c51ec061a20b9011b4b in 45ms, sequenceid=294, compaction requested=false 2024-11-24T18:56:25,341 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 4767344858182c51ec061a20b9011b4b: 2024-11-24T18:56:25,508 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,43977,1732474387094/f2b92657890a%2C43977%2C1732474387094.1732474387326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:56:25,526 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.meta.1732474386931.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:56:26,329 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:56:26,329 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:56:26,329 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:56:26,329 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:56:26,329 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:56:26,330 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:56:26,331 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:56:26,331 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:56:26,362 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:56:26,362 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:56:26,362 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:56:26,362 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:56:26,363 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:56:26,363 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:56:26,367 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:56:26,367 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:56:26,367 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:56:26,370 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:56:26,508 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,43977,1732474387094/f2b92657890a%2C43977%2C1732474387094.1732474387326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:56:26,527 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.meta.1732474386931.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:56:26,878 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-24T18:56:26,879 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:56:26,879 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:56:26,879 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:56:26,880 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:56:26,880 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:56:26,880 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:56:26,881 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:56:26,882 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:56:26,911 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:56:26,911 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:56:26,911 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:56:26,912 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:56:26,912 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:56:26,912 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:56:26,917 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:56:26,917 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:56:26,917 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:56:26,920 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:56:27,509 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,43977,1732474387094/f2b92657890a%2C43977%2C1732474387094.1732474387326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:56:27,527 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.meta.1732474386931.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:56:28,510 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,43977,1732474387094/f2b92657890a%2C43977%2C1732474387094.1732474387326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:56:28,528 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.meta.1732474386931.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:56:28,826 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-24T18:56:29,510 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,43977,1732474387094/f2b92657890a%2C43977%2C1732474387094.1732474387326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:56:29,528 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.meta.1732474386931.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:56:30,502 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 206a079f6509e9ab6eaf2e5852d1e5e8, had cached 0 bytes from a total of 70862 2024-11-24T18:56:30,511 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,43977,1732474387094/f2b92657890a%2C43977%2C1732474387094.1732474387326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:56:30,528 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 4767344858182c51ec061a20b9011b4b, had cached 0 bytes from a total of 187919 2024-11-24T18:56:30,529 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.meta.1732474386931.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:56:31,512 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,43977,1732474387094/f2b92657890a%2C43977%2C1732474387094.1732474387326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:56:31,529 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.meta.1732474386931.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:56:32,512 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,43977,1732474387094/f2b92657890a%2C43977%2C1732474387094.1732474387326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:56:32,530 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.meta.1732474386931.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:56:33,513 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,43977,1732474387094/f2b92657890a%2C43977%2C1732474387094.1732474387326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:56:33,548 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.meta.1732474386931.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:56:34,513 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,43977,1732474387094/f2b92657890a%2C43977%2C1732474387094.1732474387326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:56:34,549 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.meta.1732474386931.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:56:35,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38787 {}] regionserver.HRegion(8855): Flush requested on 4767344858182c51ec061a20b9011b4b 2024-11-24T18:56:35,424 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 4767344858182c51ec061a20b9011b4b 1/1 column families, dataSize=21.02 KB heapSize=22.75 KB 2024-11-24T18:56:35,430 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/4767344858182c51ec061a20b9011b4b/.tmp/info/72c995d03b4246f8a203001e7699d4db is 1080, key is row0226/info:/1732474585297/Put/seqid=0 2024-11-24T18:56:35,436 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46341 is added to blk_1073741872_1048 (size=26570) 2024-11-24T18:56:35,436 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44655 is added to blk_1073741872_1048 (size=26570) 2024-11-24T18:56:35,437 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=21.02 KB at sequenceid=317 (bloomFilter=true), to=hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/4767344858182c51ec061a20b9011b4b/.tmp/info/72c995d03b4246f8a203001e7699d4db 2024-11-24T18:56:35,444 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/4767344858182c51ec061a20b9011b4b/.tmp/info/72c995d03b4246f8a203001e7699d4db as hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/4767344858182c51ec061a20b9011b4b/info/72c995d03b4246f8a203001e7699d4db 2024-11-24T18:56:35,446 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38787 {}] regionserver.HRegion(5310): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=4767344858182c51ec061a20b9011b4b, server=f2b92657890a,38787,1732474529015 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-11-24T18:56:35,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38787 {}] ipc.CallRunner(138): callId: 266 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:40934 deadline: 1732474605446, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=4767344858182c51ec061a20b9011b4b, server=f2b92657890a,38787,1732474529015 2024-11-24T18:56:35,447 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,row0062,1732474544798.4767344858182c51ec061a20b9011b4b., hostname=f2b92657890a,38787,1732474529015, seqNum=127 , the old value is region=TestLogRolling-testLogRolling,row0062,1732474544798.4767344858182c51ec061a20b9011b4b., hostname=f2b92657890a,38787,1732474529015, seqNum=127, error=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=4767344858182c51ec061a20b9011b4b, server=f2b92657890a,38787,1732474529015 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-24T18:56:35,447 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,row0062,1732474544798.4767344858182c51ec061a20b9011b4b., hostname=f2b92657890a,38787,1732474529015, seqNum=127 is org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=4767344858182c51ec061a20b9011b4b, server=f2b92657890a,38787,1732474529015 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-24T18:56:35,447 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(76): Will not update region=TestLogRolling-testLogRolling,row0062,1732474544798.4767344858182c51ec061a20b9011b4b., hostname=f2b92657890a,38787,1732474529015, seqNum=127 because the exception is null or not the one we care about 2024-11-24T18:56:35,449 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/4767344858182c51ec061a20b9011b4b/info/72c995d03b4246f8a203001e7699d4db, entries=20, sequenceid=317, filesize=25.9 K 2024-11-24T18:56:35,450 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~21.02 KB/21520, heapSize ~22.73 KB/23280, currentSize=9.46 KB/9684 for 4767344858182c51ec061a20b9011b4b in 26ms, sequenceid=317, compaction requested=true 2024-11-24T18:56:35,450 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 4767344858182c51ec061a20b9011b4b: 2024-11-24T18:56:35,450 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 4767344858182c51ec061a20b9011b4b:info, priority=-2147483648, current under compaction store size is 1 2024-11-24T18:56:35,450 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-24T18:56:35,450 DEBUG [RS:0;f2b92657890a:38787-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-24T18:56:35,452 DEBUG [RS:0;f2b92657890a:38787-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 214489 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-24T18:56:35,452 DEBUG [RS:0;f2b92657890a:38787-shortCompactions-0 {}] regionserver.HStore(1541): 4767344858182c51ec061a20b9011b4b/info is initiating minor compaction (all files) 2024-11-24T18:56:35,452 INFO [RS:0;f2b92657890a:38787-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 4767344858182c51ec061a20b9011b4b/info in TestLogRolling-testLogRolling,row0062,1732474544798.4767344858182c51ec061a20b9011b4b. 2024-11-24T18:56:35,452 INFO [RS:0;f2b92657890a:38787-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/4767344858182c51ec061a20b9011b4b/info/734185fcd7a24e1f846380bcf93ee280, hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/4767344858182c51ec061a20b9011b4b/info/b7675966329b4acd8dc0c1ec5ceeea70, hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/4767344858182c51ec061a20b9011b4b/info/72c995d03b4246f8a203001e7699d4db] into tmpdir=hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/4767344858182c51ec061a20b9011b4b/.tmp, totalSize=209.5 K 2024-11-24T18:56:35,452 DEBUG [RS:0;f2b92657890a:38787-shortCompactions-0 {}] compactions.Compactor(225): Compacting 734185fcd7a24e1f846380bcf93ee280, keycount=154, bloomtype=ROW, size=168.1 K, encoding=NONE, compression=NONE, seqNum=280, earliestPutTs=1732474542689 2024-11-24T18:56:35,453 DEBUG [RS:0;f2b92657890a:38787-shortCompactions-0 {}] compactions.Compactor(225): Compacting b7675966329b4acd8dc0c1ec5ceeea70, keycount=10, bloomtype=ROW, size=15.4 K, encoding=NONE, compression=NONE, seqNum=294, earliestPutTs=1732474583275 2024-11-24T18:56:35,453 DEBUG [RS:0;f2b92657890a:38787-shortCompactions-0 {}] compactions.Compactor(225): Compacting 72c995d03b4246f8a203001e7699d4db, keycount=20, bloomtype=ROW, size=25.9 K, encoding=NONE, compression=NONE, seqNum=317, earliestPutTs=1732474585297 2024-11-24T18:56:35,466 INFO [RS:0;f2b92657890a:38787-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 4767344858182c51ec061a20b9011b4b#info#compaction#87 average throughput is 47.20 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-24T18:56:35,466 DEBUG [RS:0;f2b92657890a:38787-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/4767344858182c51ec061a20b9011b4b/.tmp/info/b0911f0faaaa4ee69a64a11a9f8e1b1a is 1080, key is row0062/info:/1732474542689/Put/seqid=0 2024-11-24T18:56:35,473 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46341 is added to blk_1073741873_1049 (size=204708) 2024-11-24T18:56:35,473 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44655 is added to blk_1073741873_1049 (size=204708) 2024-11-24T18:56:35,479 DEBUG [RS:0;f2b92657890a:38787-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/4767344858182c51ec061a20b9011b4b/.tmp/info/b0911f0faaaa4ee69a64a11a9f8e1b1a as hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/4767344858182c51ec061a20b9011b4b/info/b0911f0faaaa4ee69a64a11a9f8e1b1a 2024-11-24T18:56:35,486 INFO [RS:0;f2b92657890a:38787-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 4767344858182c51ec061a20b9011b4b/info of 4767344858182c51ec061a20b9011b4b into b0911f0faaaa4ee69a64a11a9f8e1b1a(size=199.9 K), total size for store is 199.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-24T18:56:35,486 DEBUG [RS:0;f2b92657890a:38787-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 4767344858182c51ec061a20b9011b4b: 2024-11-24T18:56:35,486 INFO [RS:0;f2b92657890a:38787-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1732474544798.4767344858182c51ec061a20b9011b4b., storeName=4767344858182c51ec061a20b9011b4b/info, priority=13, startTime=1732474595450; duration=0sec 2024-11-24T18:56:35,486 DEBUG [RS:0;f2b92657890a:38787-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-24T18:56:35,486 DEBUG [RS:0;f2b92657890a:38787-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 4767344858182c51ec061a20b9011b4b:info 2024-11-24T18:56:35,514 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,43977,1732474387094/f2b92657890a%2C43977%2C1732474387094.1732474387326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:56:35,549 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.meta.1732474386931.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:56:36,515 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,43977,1732474387094/f2b92657890a%2C43977%2C1732474387094.1732474387326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:56:36,550 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.meta.1732474386931.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:56:37,516 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,43977,1732474387094/f2b92657890a%2C43977%2C1732474387094.1732474387326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:56:37,551 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.meta.1732474386931.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:56:38,516 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,43977,1732474387094/f2b92657890a%2C43977%2C1732474387094.1732474387326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:56:38,551 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.meta.1732474386931.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:56:39,517 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,43977,1732474387094/f2b92657890a%2C43977%2C1732474387094.1732474387326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:56:39,552 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.meta.1732474386931.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:56:40,517 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,43977,1732474387094/f2b92657890a%2C43977%2C1732474387094.1732474387326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:56:40,552 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.meta.1732474386931.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:56:41,518 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,43977,1732474387094/f2b92657890a%2C43977%2C1732474387094.1732474387326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:56:41,553 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.meta.1732474386931.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:56:42,519 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,43977,1732474387094/f2b92657890a%2C43977%2C1732474387094.1732474387326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:56:42,554 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.meta.1732474386931.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:56:43,520 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,43977,1732474387094/f2b92657890a%2C43977%2C1732474387094.1732474387326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:56:43,554 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.meta.1732474386931.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:56:44,520 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,43977,1732474387094/f2b92657890a%2C43977%2C1732474387094.1732474387326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:56:44,555 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.meta.1732474386931.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:56:45,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38787 {}] regionserver.HRegion(8855): Flush requested on 4767344858182c51ec061a20b9011b4b 2024-11-24T18:56:45,463 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 4767344858182c51ec061a20b9011b4b 1/1 column families, dataSize=10.51 KB heapSize=11.50 KB 2024-11-24T18:56:45,469 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/4767344858182c51ec061a20b9011b4b/.tmp/info/e3f9d4db906d4ff9aecdc33947f9a663 is 1080, key is row0246/info:/1732474595426/Put/seqid=0 2024-11-24T18:56:45,473 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46341 is added to blk_1073741874_1050 (size=15760) 2024-11-24T18:56:45,474 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44655 is added to blk_1073741874_1050 (size=15760) 2024-11-24T18:56:45,474 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=10.51 KB at sequenceid=331 (bloomFilter=true), to=hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/4767344858182c51ec061a20b9011b4b/.tmp/info/e3f9d4db906d4ff9aecdc33947f9a663 2024-11-24T18:56:45,480 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/4767344858182c51ec061a20b9011b4b/.tmp/info/e3f9d4db906d4ff9aecdc33947f9a663 as hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/4767344858182c51ec061a20b9011b4b/info/e3f9d4db906d4ff9aecdc33947f9a663 2024-11-24T18:56:45,486 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/4767344858182c51ec061a20b9011b4b/info/e3f9d4db906d4ff9aecdc33947f9a663, entries=10, sequenceid=331, filesize=15.4 K 2024-11-24T18:56:45,487 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~10.51 KB/10760, heapSize ~11.48 KB/11760, currentSize=1.05 KB/1076 for 4767344858182c51ec061a20b9011b4b in 24ms, sequenceid=331, compaction requested=false 2024-11-24T18:56:45,487 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 4767344858182c51ec061a20b9011b4b: 2024-11-24T18:56:45,521 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,43977,1732474387094/f2b92657890a%2C43977%2C1732474387094.1732474387326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:56:45,556 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.meta.1732474386931.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:56:46,522 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,43977,1732474387094/f2b92657890a%2C43977%2C1732474387094.1732474387326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:56:46,556 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.meta.1732474386931.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:56:47,467 INFO [Time-limited test {}] wal.AbstractTestLogRolling(285): after writing there are 0 log files 2024-11-24T18:56:47,467 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor f2b92657890a%2C38787%2C1732474529015.1732474607467 2024-11-24T18:56:47,522 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,43977,1732474387094/f2b92657890a%2C43977%2C1732474387094.1732474387326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:56:47,557 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.meta.1732474386931.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:56:47,679 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:56:47,679 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:56:47,679 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:56:47,679 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:56:47,680 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:56:47,680 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/WALs/f2b92657890a,38787,1732474529015/f2b92657890a%2C38787%2C1732474529015.1732474529635 with entries=312, filesize=308.35 KB; new WAL /user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/WALs/f2b92657890a,38787,1732474529015/f2b92657890a%2C38787%2C1732474529015.1732474607467 2024-11-24T18:56:47,681 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39541:39541),(127.0.0.1/127.0.0.1:32941:32941)] 2024-11-24T18:56:47,681 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/WALs/f2b92657890a,38787,1732474529015/f2b92657890a%2C38787%2C1732474529015.1732474529635 is not closed yet, will try archiving it next time 2024-11-24T18:56:47,682 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44655 is added to blk_1073741833_1009 (size=315761) 2024-11-24T18:56:47,682 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46341 is added to blk_1073741833_1009 (size=315761) 2024-11-24T18:56:47,686 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=705 B heapSize=2.05 KB 2024-11-24T18:56:47,691 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/hbase/meta/1588230740/.tmp/info/a8a4e48a577548548794016fb4f568ab is 193, key is TestLogRolling-testLogRolling,row0062,1732474544798.4767344858182c51ec061a20b9011b4b./info:regioninfo/1732474545561/Put/seqid=0 2024-11-24T18:56:47,695 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44655 is added to blk_1073741876_1052 (size=6223) 2024-11-24T18:56:47,696 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46341 is added to blk_1073741876_1052 (size=6223) 2024-11-24T18:56:47,696 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=705 B at sequenceid=21 (bloomFilter=true), to=hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/hbase/meta/1588230740/.tmp/info/a8a4e48a577548548794016fb4f568ab 2024-11-24T18:56:47,702 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/hbase/meta/1588230740/.tmp/info/a8a4e48a577548548794016fb4f568ab as hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/hbase/meta/1588230740/info/a8a4e48a577548548794016fb4f568ab 2024-11-24T18:56:47,708 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/hbase/meta/1588230740/info/a8a4e48a577548548794016fb4f568ab, entries=5, sequenceid=21, filesize=6.1 K 2024-11-24T18:56:47,709 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~705 B/705, heapSize ~1.29 KB/1320, currentSize=0 B/0 for 1588230740 in 23ms, sequenceid=21, compaction requested=false 2024-11-24T18:56:47,709 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-11-24T18:56:47,709 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 206a079f6509e9ab6eaf2e5852d1e5e8: 2024-11-24T18:56:47,709 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 4767344858182c51ec061a20b9011b4b 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-24T18:56:47,713 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/4767344858182c51ec061a20b9011b4b/.tmp/info/3c1fc1b7c6654593a17738d26363f387 is 1080, key is row0256/info:/1732474605465/Put/seqid=0 2024-11-24T18:56:47,717 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44655 is added to blk_1073741877_1053 (size=6035) 2024-11-24T18:56:47,717 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46341 is added to blk_1073741877_1053 (size=6035) 2024-11-24T18:56:47,718 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=335 (bloomFilter=true), to=hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/4767344858182c51ec061a20b9011b4b/.tmp/info/3c1fc1b7c6654593a17738d26363f387 2024-11-24T18:56:47,724 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/4767344858182c51ec061a20b9011b4b/.tmp/info/3c1fc1b7c6654593a17738d26363f387 as hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/4767344858182c51ec061a20b9011b4b/info/3c1fc1b7c6654593a17738d26363f387 2024-11-24T18:56:47,729 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/4767344858182c51ec061a20b9011b4b/info/3c1fc1b7c6654593a17738d26363f387, entries=1, sequenceid=335, filesize=5.9 K 2024-11-24T18:56:47,730 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 4767344858182c51ec061a20b9011b4b in 21ms, sequenceid=335, compaction requested=true 2024-11-24T18:56:47,730 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 4767344858182c51ec061a20b9011b4b: 2024-11-24T18:56:47,730 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor f2b92657890a%2C38787%2C1732474529015.1732474607730 2024-11-24T18:56:47,735 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:56:47,735 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:56:47,735 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:56:47,735 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:56:47,735 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:56:47,735 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/WALs/f2b92657890a,38787,1732474529015/f2b92657890a%2C38787%2C1732474529015.1732474607467 with entries=2, filesize=723 B; new WAL /user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/WALs/f2b92657890a,38787,1732474529015/f2b92657890a%2C38787%2C1732474529015.1732474607730 2024-11-24T18:56:47,736 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:32941:32941),(127.0.0.1/127.0.0.1:39541:39541)] 2024-11-24T18:56:47,736 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/WALs/f2b92657890a,38787,1732474529015/f2b92657890a%2C38787%2C1732474529015.1732474607467 is not closed yet, will try archiving it next time 2024-11-24T18:56:47,736 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44655 is added to blk_1073741875_1051 (size=731) 2024-11-24T18:56:47,736 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/WALs/f2b92657890a,38787,1732474529015/f2b92657890a%2C38787%2C1732474529015.1732474529635 to hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/oldWALs/f2b92657890a%2C38787%2C1732474529015.1732474529635 2024-11-24T18:56:47,737 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46341 is added to blk_1073741875_1051 (size=731) 2024-11-24T18:56:47,737 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [5,000] milli-secs(wait.for.ratio=[1]) 2024-11-24T18:56:47,738 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/WALs/f2b92657890a,38787,1732474529015/f2b92657890a%2C38787%2C1732474529015.1732474607467 to hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/oldWALs/f2b92657890a%2C38787%2C1732474529015.1732474607467 2024-11-24T18:56:47,837 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-24T18:56:47,837 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-24T18:56:47,838 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-24T18:56:47,838 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T18:56:47,838 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T18:56:47,838 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-24T18:56:47,838 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-24T18:56:47,838 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=760219792, stopped=false 2024-11-24T18:56:47,838 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=f2b92657890a,39497,1732474528844 2024-11-24T18:56:47,851 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39497-0x1016e335fe80000, quorum=127.0.0.1:57443, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-24T18:56:47,851 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38787-0x1016e335fe80001, quorum=127.0.0.1:57443, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-24T18:56:47,851 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39497-0x1016e335fe80000, quorum=127.0.0.1:57443, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T18:56:47,851 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38787-0x1016e335fe80001, quorum=127.0.0.1:57443, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T18:56:47,851 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-24T18:56:47,851 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-24T18:56:47,851 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-24T18:56:47,851 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T18:56:47,852 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'f2b92657890a,38787,1732474529015' ***** 2024-11-24T18:56:47,852 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-24T18:56:47,852 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:38787-0x1016e335fe80001, quorum=127.0.0.1:57443, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-24T18:56:47,852 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:39497-0x1016e335fe80000, quorum=127.0.0.1:57443, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-24T18:56:47,852 INFO [RS:0;f2b92657890a:38787 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-24T18:56:47,852 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-24T18:56:47,853 INFO [RS:0;f2b92657890a:38787 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-24T18:56:47,853 INFO [RS:0;f2b92657890a:38787 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-24T18:56:47,853 INFO [RS:0;f2b92657890a:38787 {}] regionserver.HRegionServer(3091): Received CLOSE for 206a079f6509e9ab6eaf2e5852d1e5e8 2024-11-24T18:56:47,853 INFO [RS:0;f2b92657890a:38787 {}] regionserver.HRegionServer(3091): Received CLOSE for 4767344858182c51ec061a20b9011b4b 2024-11-24T18:56:47,853 INFO [RS:0;f2b92657890a:38787 {}] regionserver.HRegionServer(959): stopping server f2b92657890a,38787,1732474529015 2024-11-24T18:56:47,853 INFO [RS:0;f2b92657890a:38787 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-24T18:56:47,853 DEBUG [RS_CLOSE_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 206a079f6509e9ab6eaf2e5852d1e5e8, disabling compactions & flushes 2024-11-24T18:56:47,853 INFO [RS:0;f2b92657890a:38787 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;f2b92657890a:38787. 2024-11-24T18:56:47,853 INFO [RS_CLOSE_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1732474544798.206a079f6509e9ab6eaf2e5852d1e5e8. 2024-11-24T18:56:47,853 DEBUG [RS_CLOSE_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1732474544798.206a079f6509e9ab6eaf2e5852d1e5e8. 2024-11-24T18:56:47,853 DEBUG [RS:0;f2b92657890a:38787 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-24T18:56:47,853 DEBUG [RS_CLOSE_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1732474544798.206a079f6509e9ab6eaf2e5852d1e5e8. after waiting 0 ms 2024-11-24T18:56:47,853 DEBUG [RS:0;f2b92657890a:38787 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T18:56:47,853 DEBUG [RS_CLOSE_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1732474544798.206a079f6509e9ab6eaf2e5852d1e5e8. 2024-11-24T18:56:47,853 INFO [RS:0;f2b92657890a:38787 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-24T18:56:47,853 INFO [RS:0;f2b92657890a:38787 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-24T18:56:47,853 INFO [RS:0;f2b92657890a:38787 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-24T18:56:47,854 INFO [RS:0;f2b92657890a:38787 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-24T18:56:47,854 INFO [RS:0;f2b92657890a:38787 {}] regionserver.HRegionServer(1321): Waiting on 3 regions to close 2024-11-24T18:56:47,854 DEBUG [RS:0;f2b92657890a:38787 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, 206a079f6509e9ab6eaf2e5852d1e5e8=TestLogRolling-testLogRolling,,1732474544798.206a079f6509e9ab6eaf2e5852d1e5e8., 4767344858182c51ec061a20b9011b4b=TestLogRolling-testLogRolling,row0062,1732474544798.4767344858182c51ec061a20b9011b4b.} 2024-11-24T18:56:47,854 DEBUG [RS:0;f2b92657890a:38787 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 206a079f6509e9ab6eaf2e5852d1e5e8, 4767344858182c51ec061a20b9011b4b 2024-11-24T18:56:47,854 DEBUG [RS_CLOSE_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-24T18:56:47,854 INFO [RS_CLOSE_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-24T18:56:47,854 DEBUG [RS_CLOSE_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-24T18:56:47,854 DEBUG [RS_CLOSE_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-24T18:56:47,854 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732474544798.206a079f6509e9ab6eaf2e5852d1e5e8.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/206a079f6509e9ab6eaf2e5852d1e5e8/info/d70f4cd3b88a497d865b6ea27a60f4ed.39086109d05e3e13c2ae0a9ad08f12fd->hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/39086109d05e3e13c2ae0a9ad08f12fd/info/d70f4cd3b88a497d865b6ea27a60f4ed-bottom] to archive 2024-11-24T18:56:47,854 DEBUG [RS_CLOSE_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-24T18:56:47,855 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732474544798.206a079f6509e9ab6eaf2e5852d1e5e8.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-24T18:56:47,858 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732474544798.206a079f6509e9ab6eaf2e5852d1e5e8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/206a079f6509e9ab6eaf2e5852d1e5e8/info/d70f4cd3b88a497d865b6ea27a60f4ed.39086109d05e3e13c2ae0a9ad08f12fd to hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/archive/data/default/TestLogRolling-testLogRolling/206a079f6509e9ab6eaf2e5852d1e5e8/info/d70f4cd3b88a497d865b6ea27a60f4ed.39086109d05e3e13c2ae0a9ad08f12fd 2024-11-24T18:56:47,858 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732474544798.206a079f6509e9ab6eaf2e5852d1e5e8.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=f2b92657890a:39497 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 16 more 2024-11-24T18:56:47,859 WARN [StoreCloser-TestLogRolling-testLogRolling,,1732474544798.206a079f6509e9ab6eaf2e5852d1e5e8.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [] 2024-11-24T18:56:47,860 DEBUG [RS_CLOSE_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/hbase/meta/1588230740/recovered.edits/24.seqid, newMaxSeqId=24, maxSeqId=1 2024-11-24T18:56:47,860 DEBUG [RS_CLOSE_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-24T18:56:47,861 INFO [RS_CLOSE_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-24T18:56:47,861 DEBUG [RS_CLOSE_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732474607854Running coprocessor pre-close hooks at 1732474607854Disabling compacts and flushes for region at 1732474607854Disabling writes for close at 1732474607854Writing region close event to WAL at 1732474607856 (+2 ms)Running coprocessor post-close hooks at 1732474607860 (+4 ms)Closed at 1732474607860 2024-11-24T18:56:47,861 DEBUG [RS_CLOSE_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-24T18:56:47,862 DEBUG [RS_CLOSE_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/206a079f6509e9ab6eaf2e5852d1e5e8/recovered.edits/131.seqid, newMaxSeqId=131, maxSeqId=126 2024-11-24T18:56:47,863 INFO [RS_CLOSE_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1732474544798.206a079f6509e9ab6eaf2e5852d1e5e8. 2024-11-24T18:56:47,863 DEBUG [RS_CLOSE_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 206a079f6509e9ab6eaf2e5852d1e5e8: Waiting for close lock at 1732474607853Running coprocessor pre-close hooks at 1732474607853Disabling compacts and flushes for region at 1732474607853Disabling writes for close at 1732474607853Writing region close event to WAL at 1732474607859 (+6 ms)Running coprocessor post-close hooks at 1732474607863 (+4 ms)Closed at 1732474607863 2024-11-24T18:56:47,863 DEBUG [RS_CLOSE_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRolling,,1732474544798.206a079f6509e9ab6eaf2e5852d1e5e8. 2024-11-24T18:56:47,863 DEBUG [RS_CLOSE_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 4767344858182c51ec061a20b9011b4b, disabling compactions & flushes 2024-11-24T18:56:47,863 INFO [RS_CLOSE_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,row0062,1732474544798.4767344858182c51ec061a20b9011b4b. 2024-11-24T18:56:47,863 DEBUG [RS_CLOSE_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,row0062,1732474544798.4767344858182c51ec061a20b9011b4b. 2024-11-24T18:56:47,863 DEBUG [RS_CLOSE_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,row0062,1732474544798.4767344858182c51ec061a20b9011b4b. after waiting 0 ms 2024-11-24T18:56:47,863 DEBUG [RS_CLOSE_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,row0062,1732474544798.4767344858182c51ec061a20b9011b4b. 2024-11-24T18:56:47,864 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732474544798.4767344858182c51ec061a20b9011b4b.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/4767344858182c51ec061a20b9011b4b/info/d70f4cd3b88a497d865b6ea27a60f4ed.39086109d05e3e13c2ae0a9ad08f12fd->hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/39086109d05e3e13c2ae0a9ad08f12fd/info/d70f4cd3b88a497d865b6ea27a60f4ed-top, hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/4767344858182c51ec061a20b9011b4b/info/TestLogRolling-testLogRolling=39086109d05e3e13c2ae0a9ad08f12fd-834c878770fc45baa52294c92f4058e4, hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/4767344858182c51ec061a20b9011b4b/info/b52819689fd1401595cf36002beb852f, hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/4767344858182c51ec061a20b9011b4b/info/TestLogRolling-testLogRolling=39086109d05e3e13c2ae0a9ad08f12fd-727c23d250fd4509b96cfd72a766ccac, hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/4767344858182c51ec061a20b9011b4b/info/b9ba810b5e8a4218a4f764de0ee61e70, hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/4767344858182c51ec061a20b9011b4b/info/ace5f0f8fe734b76ab9c067ce0c8846a, hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/4767344858182c51ec061a20b9011b4b/info/d20a1ac108ac43428a4bee7f5c55b3a9, hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/4767344858182c51ec061a20b9011b4b/info/72dfcb0afe434fa391204141a94775b7, hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/4767344858182c51ec061a20b9011b4b/info/1f2a6ea9b78147dead3e8b40073916f2, hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/4767344858182c51ec061a20b9011b4b/info/2fda97c21de648be95199d03e7f54c32, hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/4767344858182c51ec061a20b9011b4b/info/28db862bd5914e94ad8236c41443a4fe, hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/4767344858182c51ec061a20b9011b4b/info/3087356085d448e690a0ad74947b1701, hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/4767344858182c51ec061a20b9011b4b/info/c62e1f6727d34350a53c49ff4f7d0cad, hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/4767344858182c51ec061a20b9011b4b/info/abf0203130234a3895f51a3ae6a27fd4, hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/4767344858182c51ec061a20b9011b4b/info/6a12799a474b4672a14112a05bfe600b, hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/4767344858182c51ec061a20b9011b4b/info/0621032bb8844e5fba08fdf7266f99f2, hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/4767344858182c51ec061a20b9011b4b/info/a3c4cbfc05274b2e83b214e6d0ccbd8b, hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/4767344858182c51ec061a20b9011b4b/info/734185fcd7a24e1f846380bcf93ee280, hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/4767344858182c51ec061a20b9011b4b/info/3940ae73c01b43288fcdf130e118021a, hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/4767344858182c51ec061a20b9011b4b/info/b7675966329b4acd8dc0c1ec5ceeea70, hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/4767344858182c51ec061a20b9011b4b/info/72c995d03b4246f8a203001e7699d4db] to archive 2024-11-24T18:56:47,865 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732474544798.4767344858182c51ec061a20b9011b4b.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-24T18:56:47,866 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732474544798.4767344858182c51ec061a20b9011b4b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/4767344858182c51ec061a20b9011b4b/info/d70f4cd3b88a497d865b6ea27a60f4ed.39086109d05e3e13c2ae0a9ad08f12fd to hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/archive/data/default/TestLogRolling-testLogRolling/4767344858182c51ec061a20b9011b4b/info/d70f4cd3b88a497d865b6ea27a60f4ed.39086109d05e3e13c2ae0a9ad08f12fd 2024-11-24T18:56:47,867 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732474544798.4767344858182c51ec061a20b9011b4b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/4767344858182c51ec061a20b9011b4b/info/TestLogRolling-testLogRolling=39086109d05e3e13c2ae0a9ad08f12fd-834c878770fc45baa52294c92f4058e4 to hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/archive/data/default/TestLogRolling-testLogRolling/4767344858182c51ec061a20b9011b4b/info/TestLogRolling-testLogRolling=39086109d05e3e13c2ae0a9ad08f12fd-834c878770fc45baa52294c92f4058e4 2024-11-24T18:56:47,869 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732474544798.4767344858182c51ec061a20b9011b4b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/4767344858182c51ec061a20b9011b4b/info/b52819689fd1401595cf36002beb852f to hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/archive/data/default/TestLogRolling-testLogRolling/4767344858182c51ec061a20b9011b4b/info/b52819689fd1401595cf36002beb852f 2024-11-24T18:56:47,870 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732474544798.4767344858182c51ec061a20b9011b4b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/4767344858182c51ec061a20b9011b4b/info/TestLogRolling-testLogRolling=39086109d05e3e13c2ae0a9ad08f12fd-727c23d250fd4509b96cfd72a766ccac to hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/archive/data/default/TestLogRolling-testLogRolling/4767344858182c51ec061a20b9011b4b/info/TestLogRolling-testLogRolling=39086109d05e3e13c2ae0a9ad08f12fd-727c23d250fd4509b96cfd72a766ccac 2024-11-24T18:56:47,871 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732474544798.4767344858182c51ec061a20b9011b4b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/4767344858182c51ec061a20b9011b4b/info/b9ba810b5e8a4218a4f764de0ee61e70 to hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/archive/data/default/TestLogRolling-testLogRolling/4767344858182c51ec061a20b9011b4b/info/b9ba810b5e8a4218a4f764de0ee61e70 2024-11-24T18:56:47,872 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732474544798.4767344858182c51ec061a20b9011b4b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/4767344858182c51ec061a20b9011b4b/info/ace5f0f8fe734b76ab9c067ce0c8846a to hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/archive/data/default/TestLogRolling-testLogRolling/4767344858182c51ec061a20b9011b4b/info/ace5f0f8fe734b76ab9c067ce0c8846a 2024-11-24T18:56:47,873 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732474544798.4767344858182c51ec061a20b9011b4b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/4767344858182c51ec061a20b9011b4b/info/d20a1ac108ac43428a4bee7f5c55b3a9 to hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/archive/data/default/TestLogRolling-testLogRolling/4767344858182c51ec061a20b9011b4b/info/d20a1ac108ac43428a4bee7f5c55b3a9 2024-11-24T18:56:47,875 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732474544798.4767344858182c51ec061a20b9011b4b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/4767344858182c51ec061a20b9011b4b/info/72dfcb0afe434fa391204141a94775b7 to hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/archive/data/default/TestLogRolling-testLogRolling/4767344858182c51ec061a20b9011b4b/info/72dfcb0afe434fa391204141a94775b7 2024-11-24T18:56:47,876 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732474544798.4767344858182c51ec061a20b9011b4b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/4767344858182c51ec061a20b9011b4b/info/1f2a6ea9b78147dead3e8b40073916f2 to hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/archive/data/default/TestLogRolling-testLogRolling/4767344858182c51ec061a20b9011b4b/info/1f2a6ea9b78147dead3e8b40073916f2 2024-11-24T18:56:47,877 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732474544798.4767344858182c51ec061a20b9011b4b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/4767344858182c51ec061a20b9011b4b/info/2fda97c21de648be95199d03e7f54c32 to hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/archive/data/default/TestLogRolling-testLogRolling/4767344858182c51ec061a20b9011b4b/info/2fda97c21de648be95199d03e7f54c32 2024-11-24T18:56:47,879 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732474544798.4767344858182c51ec061a20b9011b4b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/4767344858182c51ec061a20b9011b4b/info/28db862bd5914e94ad8236c41443a4fe to hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/archive/data/default/TestLogRolling-testLogRolling/4767344858182c51ec061a20b9011b4b/info/28db862bd5914e94ad8236c41443a4fe 2024-11-24T18:56:47,880 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732474544798.4767344858182c51ec061a20b9011b4b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/4767344858182c51ec061a20b9011b4b/info/3087356085d448e690a0ad74947b1701 to hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/archive/data/default/TestLogRolling-testLogRolling/4767344858182c51ec061a20b9011b4b/info/3087356085d448e690a0ad74947b1701 2024-11-24T18:56:47,882 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732474544798.4767344858182c51ec061a20b9011b4b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/4767344858182c51ec061a20b9011b4b/info/c62e1f6727d34350a53c49ff4f7d0cad to hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/archive/data/default/TestLogRolling-testLogRolling/4767344858182c51ec061a20b9011b4b/info/c62e1f6727d34350a53c49ff4f7d0cad 2024-11-24T18:56:47,883 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732474544798.4767344858182c51ec061a20b9011b4b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/4767344858182c51ec061a20b9011b4b/info/abf0203130234a3895f51a3ae6a27fd4 to hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/archive/data/default/TestLogRolling-testLogRolling/4767344858182c51ec061a20b9011b4b/info/abf0203130234a3895f51a3ae6a27fd4 2024-11-24T18:56:47,884 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732474544798.4767344858182c51ec061a20b9011b4b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/4767344858182c51ec061a20b9011b4b/info/6a12799a474b4672a14112a05bfe600b to hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/archive/data/default/TestLogRolling-testLogRolling/4767344858182c51ec061a20b9011b4b/info/6a12799a474b4672a14112a05bfe600b 2024-11-24T18:56:47,885 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732474544798.4767344858182c51ec061a20b9011b4b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/4767344858182c51ec061a20b9011b4b/info/0621032bb8844e5fba08fdf7266f99f2 to hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/archive/data/default/TestLogRolling-testLogRolling/4767344858182c51ec061a20b9011b4b/info/0621032bb8844e5fba08fdf7266f99f2 2024-11-24T18:56:47,886 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732474544798.4767344858182c51ec061a20b9011b4b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/4767344858182c51ec061a20b9011b4b/info/a3c4cbfc05274b2e83b214e6d0ccbd8b to hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/archive/data/default/TestLogRolling-testLogRolling/4767344858182c51ec061a20b9011b4b/info/a3c4cbfc05274b2e83b214e6d0ccbd8b 2024-11-24T18:56:47,887 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732474544798.4767344858182c51ec061a20b9011b4b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/4767344858182c51ec061a20b9011b4b/info/734185fcd7a24e1f846380bcf93ee280 to hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/archive/data/default/TestLogRolling-testLogRolling/4767344858182c51ec061a20b9011b4b/info/734185fcd7a24e1f846380bcf93ee280 2024-11-24T18:56:47,888 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732474544798.4767344858182c51ec061a20b9011b4b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/4767344858182c51ec061a20b9011b4b/info/3940ae73c01b43288fcdf130e118021a to hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/archive/data/default/TestLogRolling-testLogRolling/4767344858182c51ec061a20b9011b4b/info/3940ae73c01b43288fcdf130e118021a 2024-11-24T18:56:47,889 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732474544798.4767344858182c51ec061a20b9011b4b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/4767344858182c51ec061a20b9011b4b/info/b7675966329b4acd8dc0c1ec5ceeea70 to hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/archive/data/default/TestLogRolling-testLogRolling/4767344858182c51ec061a20b9011b4b/info/b7675966329b4acd8dc0c1ec5ceeea70 2024-11-24T18:56:47,890 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732474544798.4767344858182c51ec061a20b9011b4b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/4767344858182c51ec061a20b9011b4b/info/72c995d03b4246f8a203001e7699d4db to hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/archive/data/default/TestLogRolling-testLogRolling/4767344858182c51ec061a20b9011b4b/info/72c995d03b4246f8a203001e7699d4db 2024-11-24T18:56:47,890 WARN [StoreCloser-TestLogRolling-testLogRolling,row0062,1732474544798.4767344858182c51ec061a20b9011b4b.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [b52819689fd1401595cf36002beb852f=42984, b9ba810b5e8a4218a4f764de0ee61e70=12516, ace5f0f8fe734b76ab9c067ce0c8846a=65792, d20a1ac108ac43428a4bee7f5c55b3a9=20078, 72dfcb0afe434fa391204141a94775b7=17906, 1f2a6ea9b78147dead3e8b40073916f2=98311, 2fda97c21de648be95199d03e7f54c32=24394, 28db862bd5914e94ad8236c41443a4fe=17906, 3087356085d448e690a0ad74947b1701=118899, c62e1f6727d34350a53c49ff4f7d0cad=12516, abf0203130234a3895f51a3ae6a27fd4=16828, 6a12799a474b4672a14112a05bfe600b=145078, 0621032bb8844e5fba08fdf7266f99f2=19000, a3c4cbfc05274b2e83b214e6d0ccbd8b=12519, 734185fcd7a24e1f846380bcf93ee280=172159, 3940ae73c01b43288fcdf130e118021a=24412, b7675966329b4acd8dc0c1ec5ceeea70=15760, 72c995d03b4246f8a203001e7699d4db=26570] 2024-11-24T18:56:47,894 DEBUG [RS_CLOSE_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/data/default/TestLogRolling-testLogRolling/4767344858182c51ec061a20b9011b4b/recovered.edits/338.seqid, newMaxSeqId=338, maxSeqId=126 2024-11-24T18:56:47,894 INFO [RS_CLOSE_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,row0062,1732474544798.4767344858182c51ec061a20b9011b4b. 2024-11-24T18:56:47,894 DEBUG [RS_CLOSE_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 4767344858182c51ec061a20b9011b4b: Waiting for close lock at 1732474607863Running coprocessor pre-close hooks at 1732474607863Disabling compacts and flushes for region at 1732474607863Disabling writes for close at 1732474607863Writing region close event to WAL at 1732474607890 (+27 ms)Running coprocessor post-close hooks at 1732474607894 (+4 ms)Closed at 1732474607894 2024-11-24T18:56:47,894 DEBUG [RS_CLOSE_REGION-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRolling,row0062,1732474544798.4767344858182c51ec061a20b9011b4b. 2024-11-24T18:56:48,054 INFO [RS:0;f2b92657890a:38787 {}] regionserver.HRegionServer(976): stopping server f2b92657890a,38787,1732474529015; all regions closed. 2024-11-24T18:56:48,055 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:56:48,055 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:56:48,055 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:56:48,055 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:56:48,055 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:56:48,057 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44655 is added to blk_1073741834_1010 (size=8107) 2024-11-24T18:56:48,058 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46341 is added to blk_1073741834_1010 (size=8107) 2024-11-24T18:56:48,061 DEBUG [RS:0;f2b92657890a:38787 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/oldWALs 2024-11-24T18:56:48,061 INFO [RS:0;f2b92657890a:38787 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog f2b92657890a%2C38787%2C1732474529015.meta:.meta(num 1732474530008) 2024-11-24T18:56:48,062 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:56:48,062 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:56:48,062 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:56:48,062 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:56:48,062 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:56:48,064 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46341 is added to blk_1073741878_1054 (size=780) 2024-11-24T18:56:48,064 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44655 is added to blk_1073741878_1054 (size=780) 2024-11-24T18:56:48,066 DEBUG [RS:0;f2b92657890a:38787 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/oldWALs 2024-11-24T18:56:48,066 INFO [RS:0;f2b92657890a:38787 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog f2b92657890a%2C38787%2C1732474529015:(num 1732474607730) 2024-11-24T18:56:48,066 DEBUG [RS:0;f2b92657890a:38787 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T18:56:48,066 INFO [RS:0;f2b92657890a:38787 {}] regionserver.LeaseManager(133): Closed leases 2024-11-24T18:56:48,066 INFO [RS:0;f2b92657890a:38787 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-24T18:56:48,067 INFO [RS:0;f2b92657890a:38787 {}] hbase.ChoreService(370): Chore service for: regionserver/f2b92657890a:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-24T18:56:48,067 INFO [RS:0;f2b92657890a:38787 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-24T18:56:48,067 INFO [regionserver/f2b92657890a:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-24T18:56:48,067 INFO [RS:0;f2b92657890a:38787 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:38787 2024-11-24T18:56:48,079 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39497-0x1016e335fe80000, quorum=127.0.0.1:57443, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-24T18:56:48,079 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38787-0x1016e335fe80001, quorum=127.0.0.1:57443, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/f2b92657890a,38787,1732474529015 2024-11-24T18:56:48,079 INFO [RS:0;f2b92657890a:38787 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-24T18:56:48,089 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [f2b92657890a,38787,1732474529015] 2024-11-24T18:56:48,100 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/f2b92657890a,38787,1732474529015 already deleted, retry=false 2024-11-24T18:56:48,100 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; f2b92657890a,38787,1732474529015 expired; onlineServers=0 2024-11-24T18:56:48,100 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'f2b92657890a,39497,1732474528844' ***** 2024-11-24T18:56:48,100 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-24T18:56:48,100 INFO [M:0;f2b92657890a:39497 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-24T18:56:48,100 INFO [M:0;f2b92657890a:39497 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-24T18:56:48,100 DEBUG [M:0;f2b92657890a:39497 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-24T18:56:48,100 DEBUG [M:0;f2b92657890a:39497 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-24T18:56:48,100 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-24T18:56:48,100 DEBUG [master/f2b92657890a:0:becomeActiveMaster-HFileCleaner.large.0-1732474529370 {}] cleaner.HFileCleaner(306): Exit Thread[master/f2b92657890a:0:becomeActiveMaster-HFileCleaner.large.0-1732474529370,5,FailOnTimeoutGroup] 2024-11-24T18:56:48,100 DEBUG [master/f2b92657890a:0:becomeActiveMaster-HFileCleaner.small.0-1732474529370 {}] cleaner.HFileCleaner(306): Exit Thread[master/f2b92657890a:0:becomeActiveMaster-HFileCleaner.small.0-1732474529370,5,FailOnTimeoutGroup] 2024-11-24T18:56:48,101 INFO [M:0;f2b92657890a:39497 {}] hbase.ChoreService(370): Chore service for: master/f2b92657890a:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-24T18:56:48,101 INFO [M:0;f2b92657890a:39497 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-24T18:56:48,101 DEBUG [M:0;f2b92657890a:39497 {}] master.HMaster(1795): Stopping service threads 2024-11-24T18:56:48,101 INFO [M:0;f2b92657890a:39497 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-24T18:56:48,101 INFO [M:0;f2b92657890a:39497 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-24T18:56:48,101 INFO [M:0;f2b92657890a:39497 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-24T18:56:48,101 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-24T18:56:48,110 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39497-0x1016e335fe80000, quorum=127.0.0.1:57443, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-24T18:56:48,110 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39497-0x1016e335fe80000, quorum=127.0.0.1:57443, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T18:56:48,111 DEBUG [M:0;f2b92657890a:39497 {}] zookeeper.ZKUtil(347): master:39497-0x1016e335fe80000, quorum=127.0.0.1:57443, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-24T18:56:48,111 WARN [M:0;f2b92657890a:39497 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-24T18:56:48,111 INFO [M:0;f2b92657890a:39497 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/.lastflushedseqids 2024-11-24T18:56:48,117 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46341 is added to blk_1073741879_1055 (size=228) 2024-11-24T18:56:48,117 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44655 is added to blk_1073741879_1055 (size=228) 2024-11-24T18:56:48,118 INFO [M:0;f2b92657890a:39497 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-24T18:56:48,118 INFO [M:0;f2b92657890a:39497 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-24T18:56:48,118 DEBUG [M:0;f2b92657890a:39497 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-24T18:56:48,118 INFO [M:0;f2b92657890a:39497 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T18:56:48,118 DEBUG [M:0;f2b92657890a:39497 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T18:56:48,118 DEBUG [M:0;f2b92657890a:39497 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-24T18:56:48,118 DEBUG [M:0;f2b92657890a:39497 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T18:56:48,118 INFO [M:0;f2b92657890a:39497 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=51.43 KB heapSize=63.36 KB 2024-11-24T18:56:48,137 DEBUG [M:0;f2b92657890a:39497 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/d01fa631075a461f9424022eaf9795b5 is 82, key is hbase:meta,,1/info:regioninfo/1732474530035/Put/seqid=0 2024-11-24T18:56:48,141 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46341 is added to blk_1073741880_1056 (size=5672) 2024-11-24T18:56:48,142 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44655 is added to blk_1073741880_1056 (size=5672) 2024-11-24T18:56:48,142 INFO [M:0;f2b92657890a:39497 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/d01fa631075a461f9424022eaf9795b5 2024-11-24T18:56:48,166 DEBUG [M:0;f2b92657890a:39497 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/fb581f19652a4e3cbc2aca01521c0743 is 751, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1732474530831/Put/seqid=0 2024-11-24T18:56:48,170 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46341 is added to blk_1073741881_1057 (size=7091) 2024-11-24T18:56:48,171 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44655 is added to blk_1073741881_1057 (size=7091) 2024-11-24T18:56:48,190 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38787-0x1016e335fe80001, quorum=127.0.0.1:57443, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-24T18:56:48,190 INFO [RS:0;f2b92657890a:38787 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-24T18:56:48,190 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38787-0x1016e335fe80001, quorum=127.0.0.1:57443, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-24T18:56:48,190 INFO [RS:0;f2b92657890a:38787 {}] regionserver.HRegionServer(1031): Exiting; stopping=f2b92657890a,38787,1732474529015; zookeeper connection closed. 2024-11-24T18:56:48,190 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@726c3412 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@726c3412 2024-11-24T18:56:48,190 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-24T18:56:48,327 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-24T18:56:48,328 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-24T18:56:48,328 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling 2024-11-24T18:56:48,523 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,43977,1732474387094/f2b92657890a%2C43977%2C1732474387094.1732474387326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:56:48,557 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.meta.1732474386931.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:56:48,571 INFO [M:0;f2b92657890a:39497 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=50.83 KB at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/fb581f19652a4e3cbc2aca01521c0743 2024-11-24T18:56:48,576 INFO [M:0;f2b92657890a:39497 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for fb581f19652a4e3cbc2aca01521c0743 2024-11-24T18:56:48,591 DEBUG [M:0;f2b92657890a:39497 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/6a3d0427281d4a7d98a56f966d6bbac1 is 69, key is f2b92657890a,38787,1732474529015/rs:state/1732474529472/Put/seqid=0 2024-11-24T18:56:48,596 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46341 is added to blk_1073741882_1058 (size=5156) 2024-11-24T18:56:48,596 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44655 is added to blk_1073741882_1058 (size=5156) 2024-11-24T18:56:48,597 INFO [M:0;f2b92657890a:39497 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/6a3d0427281d4a7d98a56f966d6bbac1 2024-11-24T18:56:48,625 DEBUG [M:0;f2b92657890a:39497 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/d06f53a470fd4c9093e09a1276cad5bc is 52, key is load_balancer_on/state:d/1732474530452/Put/seqid=0 2024-11-24T18:56:48,634 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44655 is added to blk_1073741883_1059 (size=5056) 2024-11-24T18:56:48,634 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46341 is added to blk_1073741883_1059 (size=5056) 2024-11-24T18:56:48,634 INFO [M:0;f2b92657890a:39497 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/d06f53a470fd4c9093e09a1276cad5bc 2024-11-24T18:56:48,640 DEBUG [M:0;f2b92657890a:39497 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/d01fa631075a461f9424022eaf9795b5 as hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/d01fa631075a461f9424022eaf9795b5 2024-11-24T18:56:48,645 INFO [M:0;f2b92657890a:39497 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/d01fa631075a461f9424022eaf9795b5, entries=8, sequenceid=125, filesize=5.5 K 2024-11-24T18:56:48,646 DEBUG [M:0;f2b92657890a:39497 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/fb581f19652a4e3cbc2aca01521c0743 as hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/fb581f19652a4e3cbc2aca01521c0743 2024-11-24T18:56:48,651 INFO [M:0;f2b92657890a:39497 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for fb581f19652a4e3cbc2aca01521c0743 2024-11-24T18:56:48,651 INFO [M:0;f2b92657890a:39497 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/fb581f19652a4e3cbc2aca01521c0743, entries=13, sequenceid=125, filesize=6.9 K 2024-11-24T18:56:48,652 DEBUG [M:0;f2b92657890a:39497 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/6a3d0427281d4a7d98a56f966d6bbac1 as hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/6a3d0427281d4a7d98a56f966d6bbac1 2024-11-24T18:56:48,657 INFO [M:0;f2b92657890a:39497 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/6a3d0427281d4a7d98a56f966d6bbac1, entries=1, sequenceid=125, filesize=5.0 K 2024-11-24T18:56:48,658 DEBUG [M:0;f2b92657890a:39497 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/d06f53a470fd4c9093e09a1276cad5bc as hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/d06f53a470fd4c9093e09a1276cad5bc 2024-11-24T18:56:48,663 INFO [M:0;f2b92657890a:39497 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33185/user/jenkins/test-data/0e0df926-b58a-034c-d2f7-c6a51eb1ed35/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/d06f53a470fd4c9093e09a1276cad5bc, entries=1, sequenceid=125, filesize=4.9 K 2024-11-24T18:56:48,665 INFO [M:0;f2b92657890a:39497 {}] regionserver.HRegion(3140): Finished flush of dataSize ~51.43 KB/52663, heapSize ~63.30 KB/64816, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 547ms, sequenceid=125, compaction requested=false 2024-11-24T18:56:48,677 INFO [M:0;f2b92657890a:39497 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T18:56:48,677 DEBUG [M:0;f2b92657890a:39497 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732474608118Disabling compacts and flushes for region at 1732474608118Disabling writes for close at 1732474608118Obtaining lock to block concurrent updates at 1732474608118Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732474608118Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=52663, getHeapSize=64816, getOffHeapSize=0, getCellsCount=148 at 1732474608119 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732474608119Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732474608119Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732474608137 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732474608137Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732474608147 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732474608165 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732474608165Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732474608576 (+411 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732474608590 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732474608590Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1732474608602 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1732474608625 (+23 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1732474608625Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@31684e57: reopening flushed file at 1732474608639 (+14 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@54579dc3: reopening flushed file at 1732474608646 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@643f7553: reopening flushed file at 1732474608651 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@747c4b: reopening flushed file at 1732474608657 (+6 ms)Finished flush of dataSize ~51.43 KB/52663, heapSize ~63.30 KB/64816, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 547ms, sequenceid=125, compaction requested=false at 1732474608665 (+8 ms)Writing region close event to WAL at 1732474608677 (+12 ms)Closed at 1732474608677 2024-11-24T18:56:48,678 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:56:48,678 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:56:48,678 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:56:48,678 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:56:48,678 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:56:48,680 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44655 is added to blk_1073741830_1006 (size=61332) 2024-11-24T18:56:48,680 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46341 is added to blk_1073741830_1006 (size=61332) 2024-11-24T18:56:48,680 INFO [M:0;f2b92657890a:39497 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-24T18:56:48,680 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-24T18:56:48,681 INFO [M:0;f2b92657890a:39497 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:39497 2024-11-24T18:56:48,681 INFO [M:0;f2b92657890a:39497 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-24T18:56:48,832 INFO [M:0;f2b92657890a:39497 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-24T18:56:48,832 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39497-0x1016e335fe80000, quorum=127.0.0.1:57443, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-24T18:56:48,832 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39497-0x1016e335fe80000, quorum=127.0.0.1:57443, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-24T18:56:48,834 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5c3bfe4f{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T18:56:48,834 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5dea9c62{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-24T18:56:48,834 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-24T18:56:48,834 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@29765213{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-24T18:56:48,834 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7d4c2da4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/882033f2-9673-329f-2c53-86bb3e2b6bbb/hadoop.log.dir/,STOPPED} 2024-11-24T18:56:48,836 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-24T18:56:48,836 WARN [BP-729962942-172.17.0.2-1732474526503 heartbeating to localhost/127.0.0.1:33185 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-24T18:56:48,836 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-24T18:56:48,836 WARN [BP-729962942-172.17.0.2-1732474526503 heartbeating to localhost/127.0.0.1:33185 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-729962942-172.17.0.2-1732474526503 (Datanode Uuid 9a2e26e3-8f4c-4eb2-8bb8-052bd469fa20) service to localhost/127.0.0.1:33185 2024-11-24T18:56:48,836 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/882033f2-9673-329f-2c53-86bb3e2b6bbb/cluster_53af99ab-4d80-99f9-07c3-d74bd4d340bf/data/data3/current/BP-729962942-172.17.0.2-1732474526503 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T18:56:48,837 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/882033f2-9673-329f-2c53-86bb3e2b6bbb/cluster_53af99ab-4d80-99f9-07c3-d74bd4d340bf/data/data4/current/BP-729962942-172.17.0.2-1732474526503 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T18:56:48,837 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-24T18:56:48,838 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@25dcc129{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T18:56:48,839 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1b95b0ea{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-24T18:56:48,839 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-24T18:56:48,839 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@767f877d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-24T18:56:48,839 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@77b370f4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/882033f2-9673-329f-2c53-86bb3e2b6bbb/hadoop.log.dir/,STOPPED} 2024-11-24T18:56:48,840 WARN [BP-729962942-172.17.0.2-1732474526503 heartbeating to localhost/127.0.0.1:33185 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-24T18:56:48,840 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-24T18:56:48,840 WARN [BP-729962942-172.17.0.2-1732474526503 heartbeating to localhost/127.0.0.1:33185 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-729962942-172.17.0.2-1732474526503 (Datanode Uuid 872c0dfe-3149-46fd-b8b4-0b7c9bae5684) service to localhost/127.0.0.1:33185 2024-11-24T18:56:48,840 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-24T18:56:48,841 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/882033f2-9673-329f-2c53-86bb3e2b6bbb/cluster_53af99ab-4d80-99f9-07c3-d74bd4d340bf/data/data1/current/BP-729962942-172.17.0.2-1732474526503 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T18:56:48,841 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/882033f2-9673-329f-2c53-86bb3e2b6bbb/cluster_53af99ab-4d80-99f9-07c3-d74bd4d340bf/data/data2/current/BP-729962942-172.17.0.2-1732474526503 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T18:56:48,841 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-24T18:56:48,846 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@f63b03b{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-24T18:56:48,847 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@622d58de{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-24T18:56:48,847 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-24T18:56:48,847 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4e36d39c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-24T18:56:48,847 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3bc081d8{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/882033f2-9673-329f-2c53-86bb3e2b6bbb/hadoop.log.dir/,STOPPED} 2024-11-24T18:56:48,854 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-24T18:56:48,888 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-24T18:56:48,896 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRolling Thread=228 (was 207) Potentially hanging thread: regionserver/f2b92657890a:0.leaseChecker java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.regionserver.LeaseManager.run(LeaseManager.java:82) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.6@localhost:33185 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:33185 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:33185 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-41-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:33185 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:33185 from jenkins.hfs.6 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: LeaseRenewer:jenkins@localhost:33185 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:33185 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:33185 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=503 (was 483) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=190 (was 205), ProcessCount=11 (was 11), AvailableMemoryMB=8016 (was 8236) 2024-11-24T18:56:48,905 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnNothingWritten Thread=228, OpenFileDescriptor=503, MaxFileDescriptor=1048576, SystemLoadAverage=190, ProcessCount=11, AvailableMemoryMB=8016 2024-11-24T18:56:48,905 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-24T18:56:48,905 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/882033f2-9673-329f-2c53-86bb3e2b6bbb/hadoop.log.dir so I do NOT create it in target/test-data/f9d6bc15-c0b5-ed0e-cfcf-999a38d32d67 2024-11-24T18:56:48,905 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/882033f2-9673-329f-2c53-86bb3e2b6bbb/hadoop.tmp.dir so I do NOT create it in target/test-data/f9d6bc15-c0b5-ed0e-cfcf-999a38d32d67 2024-11-24T18:56:48,905 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f9d6bc15-c0b5-ed0e-cfcf-999a38d32d67/cluster_182f2636-85e1-4f57-e859-03b49e3286cf, deleteOnExit=true 2024-11-24T18:56:48,905 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-24T18:56:48,905 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f9d6bc15-c0b5-ed0e-cfcf-999a38d32d67/test.cache.data in system properties and HBase conf 2024-11-24T18:56:48,905 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f9d6bc15-c0b5-ed0e-cfcf-999a38d32d67/hadoop.tmp.dir in system properties and HBase conf 2024-11-24T18:56:48,906 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f9d6bc15-c0b5-ed0e-cfcf-999a38d32d67/hadoop.log.dir in system properties and HBase conf 2024-11-24T18:56:48,906 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f9d6bc15-c0b5-ed0e-cfcf-999a38d32d67/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-24T18:56:48,906 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f9d6bc15-c0b5-ed0e-cfcf-999a38d32d67/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-24T18:56:48,906 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-24T18:56:48,906 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-24T18:56:48,906 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f9d6bc15-c0b5-ed0e-cfcf-999a38d32d67/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-24T18:56:48,906 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f9d6bc15-c0b5-ed0e-cfcf-999a38d32d67/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-24T18:56:48,906 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f9d6bc15-c0b5-ed0e-cfcf-999a38d32d67/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-24T18:56:48,906 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f9d6bc15-c0b5-ed0e-cfcf-999a38d32d67/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-24T18:56:48,906 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f9d6bc15-c0b5-ed0e-cfcf-999a38d32d67/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-24T18:56:48,906 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f9d6bc15-c0b5-ed0e-cfcf-999a38d32d67/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-24T18:56:48,906 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f9d6bc15-c0b5-ed0e-cfcf-999a38d32d67/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-24T18:56:48,907 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f9d6bc15-c0b5-ed0e-cfcf-999a38d32d67/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-24T18:56:48,907 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f9d6bc15-c0b5-ed0e-cfcf-999a38d32d67/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-24T18:56:48,907 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f9d6bc15-c0b5-ed0e-cfcf-999a38d32d67/nfs.dump.dir in system properties and HBase conf 2024-11-24T18:56:48,907 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f9d6bc15-c0b5-ed0e-cfcf-999a38d32d67/java.io.tmpdir in system properties and HBase conf 2024-11-24T18:56:48,907 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f9d6bc15-c0b5-ed0e-cfcf-999a38d32d67/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-24T18:56:48,907 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f9d6bc15-c0b5-ed0e-cfcf-999a38d32d67/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-24T18:56:48,907 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f9d6bc15-c0b5-ed0e-cfcf-999a38d32d67/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-24T18:56:48,920 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-24T18:56:49,343 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T18:56:49,347 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-24T18:56:49,348 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-24T18:56:49,348 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-24T18:56:49,348 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-24T18:56:49,348 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T18:56:49,352 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3d148abe{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f9d6bc15-c0b5-ed0e-cfcf-999a38d32d67/hadoop.log.dir/,AVAILABLE} 2024-11-24T18:56:49,352 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@717a950c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-24T18:56:49,461 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@380ffe40{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f9d6bc15-c0b5-ed0e-cfcf-999a38d32d67/java.io.tmpdir/jetty-localhost-39643-hadoop-hdfs-3_4_1-tests_jar-_-any-2316967794240179587/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-24T18:56:49,461 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3910812a{HTTP/1.1, (http/1.1)}{localhost:39643} 2024-11-24T18:56:49,461 INFO [Time-limited test {}] server.Server(415): Started @338813ms 2024-11-24T18:56:49,477 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-24T18:56:49,498 INFO [regionserver/f2b92657890a:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-24T18:56:49,524 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,43977,1732474387094/f2b92657890a%2C43977%2C1732474387094.1732474387326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:56:49,558 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.meta.1732474386931.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:56:49,752 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T18:56:49,755 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-24T18:56:49,756 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-24T18:56:49,756 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-24T18:56:49,756 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-24T18:56:49,757 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@b0e389f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f9d6bc15-c0b5-ed0e-cfcf-999a38d32d67/hadoop.log.dir/,AVAILABLE} 2024-11-24T18:56:49,757 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5e2fef96{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-24T18:56:49,869 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1040cecb{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f9d6bc15-c0b5-ed0e-cfcf-999a38d32d67/java.io.tmpdir/jetty-localhost-35269-hadoop-hdfs-3_4_1-tests_jar-_-any-7214295467908130763/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T18:56:49,869 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1f1185ee{HTTP/1.1, (http/1.1)}{localhost:35269} 2024-11-24T18:56:49,869 INFO [Time-limited test {}] server.Server(415): Started @339222ms 2024-11-24T18:56:49,870 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-24T18:56:49,901 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T18:56:49,904 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-24T18:56:49,904 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-24T18:56:49,904 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-24T18:56:49,905 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-24T18:56:49,905 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@44402286{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f9d6bc15-c0b5-ed0e-cfcf-999a38d32d67/hadoop.log.dir/,AVAILABLE} 2024-11-24T18:56:49,905 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@59622fd9{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-24T18:56:50,010 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@6bfe0bbd{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f9d6bc15-c0b5-ed0e-cfcf-999a38d32d67/java.io.tmpdir/jetty-localhost-39631-hadoop-hdfs-3_4_1-tests_jar-_-any-7217678071878545722/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T18:56:50,010 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2f95849a{HTTP/1.1, (http/1.1)}{localhost:39631} 2024-11-24T18:56:50,010 INFO [Time-limited test {}] server.Server(415): Started @339362ms 2024-11-24T18:56:50,011 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-24T18:56:50,524 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,43977,1732474387094/f2b92657890a%2C43977%2C1732474387094.1732474387326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:56:50,558 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.meta.1732474386931.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:56:51,524 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,43977,1732474387094/f2b92657890a%2C43977%2C1732474387094.1732474387326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:56:51,559 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.meta.1732474386931.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:56:51,686 WARN [Thread-2495 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f9d6bc15-c0b5-ed0e-cfcf-999a38d32d67/cluster_182f2636-85e1-4f57-e859-03b49e3286cf/data/data2/current/BP-571796543-172.17.0.2-1732474608924/current, will proceed with Du for space computation calculation, 2024-11-24T18:56:51,686 WARN [Thread-2494 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f9d6bc15-c0b5-ed0e-cfcf-999a38d32d67/cluster_182f2636-85e1-4f57-e859-03b49e3286cf/data/data1/current/BP-571796543-172.17.0.2-1732474608924/current, will proceed with Du for space computation calculation, 2024-11-24T18:56:51,711 WARN [Thread-2458 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-24T18:56:51,713 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x99c50c7234c0d72d with lease ID 0x1c9a7ccfd6ab58a: Processing first storage report for DS-ed8e168c-67ad-4bd3-a4eb-c74bbdce2180 from datanode DatanodeRegistration(127.0.0.1:42655, datanodeUuid=53a84466-1404-4fa0-b64a-99bde38985b7, infoPort=45991, infoSecurePort=0, ipcPort=44149, storageInfo=lv=-57;cid=testClusterID;nsid=294757765;c=1732474608924) 2024-11-24T18:56:51,713 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x99c50c7234c0d72d with lease ID 0x1c9a7ccfd6ab58a: from storage DS-ed8e168c-67ad-4bd3-a4eb-c74bbdce2180 node DatanodeRegistration(127.0.0.1:42655, datanodeUuid=53a84466-1404-4fa0-b64a-99bde38985b7, infoPort=45991, infoSecurePort=0, ipcPort=44149, storageInfo=lv=-57;cid=testClusterID;nsid=294757765;c=1732474608924), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T18:56:51,713 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x99c50c7234c0d72d with lease ID 0x1c9a7ccfd6ab58a: Processing first storage report for DS-a2311898-cfc1-4b27-88e1-0654575691a2 from datanode DatanodeRegistration(127.0.0.1:42655, datanodeUuid=53a84466-1404-4fa0-b64a-99bde38985b7, infoPort=45991, infoSecurePort=0, ipcPort=44149, storageInfo=lv=-57;cid=testClusterID;nsid=294757765;c=1732474608924) 2024-11-24T18:56:51,713 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x99c50c7234c0d72d with lease ID 0x1c9a7ccfd6ab58a: from storage DS-a2311898-cfc1-4b27-88e1-0654575691a2 node DatanodeRegistration(127.0.0.1:42655, datanodeUuid=53a84466-1404-4fa0-b64a-99bde38985b7, infoPort=45991, infoSecurePort=0, ipcPort=44149, storageInfo=lv=-57;cid=testClusterID;nsid=294757765;c=1732474608924), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T18:56:51,739 WARN [Thread-2505 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f9d6bc15-c0b5-ed0e-cfcf-999a38d32d67/cluster_182f2636-85e1-4f57-e859-03b49e3286cf/data/data3/current/BP-571796543-172.17.0.2-1732474608924/current, will proceed with Du for space computation calculation, 2024-11-24T18:56:51,739 WARN [Thread-2506 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f9d6bc15-c0b5-ed0e-cfcf-999a38d32d67/cluster_182f2636-85e1-4f57-e859-03b49e3286cf/data/data4/current/BP-571796543-172.17.0.2-1732474608924/current, will proceed with Du for space computation calculation, 2024-11-24T18:56:51,759 WARN [Thread-2481 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-24T18:56:51,761 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x22c16f07c22cdad1 with lease ID 0x1c9a7ccfd6ab58b: Processing first storage report for DS-a12b1ca8-bf6a-458b-8f77-38c6dd5c4804 from datanode DatanodeRegistration(127.0.0.1:34015, datanodeUuid=8c1004f4-e3d1-4952-a8d7-d0dd258e89b0, infoPort=44367, infoSecurePort=0, ipcPort=39635, storageInfo=lv=-57;cid=testClusterID;nsid=294757765;c=1732474608924) 2024-11-24T18:56:51,761 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x22c16f07c22cdad1 with lease ID 0x1c9a7ccfd6ab58b: from storage DS-a12b1ca8-bf6a-458b-8f77-38c6dd5c4804 node DatanodeRegistration(127.0.0.1:34015, datanodeUuid=8c1004f4-e3d1-4952-a8d7-d0dd258e89b0, infoPort=44367, infoSecurePort=0, ipcPort=39635, storageInfo=lv=-57;cid=testClusterID;nsid=294757765;c=1732474608924), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T18:56:51,761 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x22c16f07c22cdad1 with lease ID 0x1c9a7ccfd6ab58b: Processing first storage report for DS-6b3cb893-3c3a-4121-b5ce-16595197feaa from datanode DatanodeRegistration(127.0.0.1:34015, datanodeUuid=8c1004f4-e3d1-4952-a8d7-d0dd258e89b0, infoPort=44367, infoSecurePort=0, ipcPort=39635, storageInfo=lv=-57;cid=testClusterID;nsid=294757765;c=1732474608924) 2024-11-24T18:56:51,761 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x22c16f07c22cdad1 with lease ID 0x1c9a7ccfd6ab58b: from storage DS-6b3cb893-3c3a-4121-b5ce-16595197feaa node DatanodeRegistration(127.0.0.1:34015, datanodeUuid=8c1004f4-e3d1-4952-a8d7-d0dd258e89b0, infoPort=44367, infoSecurePort=0, ipcPort=39635, storageInfo=lv=-57;cid=testClusterID;nsid=294757765;c=1732474608924), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T18:56:51,848 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f9d6bc15-c0b5-ed0e-cfcf-999a38d32d67 2024-11-24T18:56:51,854 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f9d6bc15-c0b5-ed0e-cfcf-999a38d32d67/cluster_182f2636-85e1-4f57-e859-03b49e3286cf/zookeeper_0, clientPort=59046, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f9d6bc15-c0b5-ed0e-cfcf-999a38d32d67/cluster_182f2636-85e1-4f57-e859-03b49e3286cf/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f9d6bc15-c0b5-ed0e-cfcf-999a38d32d67/cluster_182f2636-85e1-4f57-e859-03b49e3286cf/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-24T18:56:51,855 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=59046 2024-11-24T18:56:51,855 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T18:56:51,857 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T18:56:51,865 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42655 is added to blk_1073741825_1001 (size=7) 2024-11-24T18:56:51,865 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34015 is added to blk_1073741825_1001 (size=7) 2024-11-24T18:56:51,866 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:39241/user/jenkins/test-data/3a0134bb-b7c4-7397-228f-a12bf096b197 with version=8 2024-11-24T18:56:51,866 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:40437/user/jenkins/test-data/91bb223a-a964-b097-0799-aa34536414f2/hbase-staging 2024-11-24T18:56:51,868 INFO [Time-limited test {}] client.ConnectionUtils(128): master/f2b92657890a:0 server-side Connection retries=45 2024-11-24T18:56:51,868 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-24T18:56:51,868 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-24T18:56:51,869 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-24T18:56:51,869 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-24T18:56:51,869 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-24T18:56:51,869 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-24T18:56:51,869 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-24T18:56:51,869 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:37509 2024-11-24T18:56:51,870 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:37509 connecting to ZooKeeper ensemble=127.0.0.1:59046 2024-11-24T18:56:51,939 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:375090x0, quorum=127.0.0.1:59046, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-24T18:56:51,940 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:37509-0x1016e34a4370000 connected 2024-11-24T18:56:52,019 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T18:56:52,021 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T18:56:52,024 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:37509-0x1016e34a4370000, quorum=127.0.0.1:59046, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-24T18:56:52,024 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:39241/user/jenkins/test-data/3a0134bb-b7c4-7397-228f-a12bf096b197, hbase.cluster.distributed=false 2024-11-24T18:56:52,026 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:37509-0x1016e34a4370000, quorum=127.0.0.1:59046, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-24T18:56:52,026 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=37509 2024-11-24T18:56:52,026 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=37509 2024-11-24T18:56:52,027 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=37509 2024-11-24T18:56:52,027 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=37509 2024-11-24T18:56:52,027 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=37509 2024-11-24T18:56:52,046 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/f2b92657890a:0 server-side Connection retries=45 2024-11-24T18:56:52,046 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-24T18:56:52,046 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-24T18:56:52,046 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-24T18:56:52,046 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-24T18:56:52,046 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-24T18:56:52,046 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-24T18:56:52,046 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-24T18:56:52,047 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:32805 2024-11-24T18:56:52,048 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:32805 connecting to ZooKeeper ensemble=127.0.0.1:59046 2024-11-24T18:56:52,049 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T18:56:52,050 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T18:56:52,061 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:328050x0, quorum=127.0.0.1:59046, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-24T18:56:52,061 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:328050x0, quorum=127.0.0.1:59046, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-24T18:56:52,061 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:32805-0x1016e34a4370001 connected 2024-11-24T18:56:52,062 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-24T18:56:52,062 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-24T18:56:52,063 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:32805-0x1016e34a4370001, quorum=127.0.0.1:59046, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-24T18:56:52,064 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:32805-0x1016e34a4370001, quorum=127.0.0.1:59046, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-24T18:56:52,064 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=32805 2024-11-24T18:56:52,064 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=32805 2024-11-24T18:56:52,068 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=32805 2024-11-24T18:56:52,069 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=32805 2024-11-24T18:56:52,069 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=32805 2024-11-24T18:56:52,082 DEBUG [M:0;f2b92657890a:37509 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;f2b92657890a:37509 2024-11-24T18:56:52,082 INFO [master/f2b92657890a:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/f2b92657890a,37509,1732474611868 2024-11-24T18:56:52,093 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37509-0x1016e34a4370000, quorum=127.0.0.1:59046, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-24T18:56:52,093 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32805-0x1016e34a4370001, quorum=127.0.0.1:59046, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-24T18:56:52,093 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:37509-0x1016e34a4370000, quorum=127.0.0.1:59046, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/f2b92657890a,37509,1732474611868 2024-11-24T18:56:52,103 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37509-0x1016e34a4370000, quorum=127.0.0.1:59046, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T18:56:52,103 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32805-0x1016e34a4370001, quorum=127.0.0.1:59046, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-24T18:56:52,103 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32805-0x1016e34a4370001, quorum=127.0.0.1:59046, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T18:56:52,104 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:37509-0x1016e34a4370000, quorum=127.0.0.1:59046, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-24T18:56:52,104 INFO [master/f2b92657890a:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/f2b92657890a,37509,1732474611868 from backup master directory 2024-11-24T18:56:52,114 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32805-0x1016e34a4370001, quorum=127.0.0.1:59046, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-24T18:56:52,114 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37509-0x1016e34a4370000, quorum=127.0.0.1:59046, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/f2b92657890a,37509,1732474611868 2024-11-24T18:56:52,114 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37509-0x1016e34a4370000, quorum=127.0.0.1:59046, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-24T18:56:52,114 WARN [master/f2b92657890a:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-24T18:56:52,114 INFO [master/f2b92657890a:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=f2b92657890a,37509,1732474611868 2024-11-24T18:56:52,117 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:39241/user/jenkins/test-data/3a0134bb-b7c4-7397-228f-a12bf096b197/hbase.id] with ID: 51e73470-1b9f-43a3-b954-e56d0cd7cda4 2024-11-24T18:56:52,117 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:39241/user/jenkins/test-data/3a0134bb-b7c4-7397-228f-a12bf096b197/.tmp/hbase.id 2024-11-24T18:56:52,122 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42655 is added to blk_1073741826_1002 (size=42) 2024-11-24T18:56:52,122 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34015 is added to blk_1073741826_1002 (size=42) 2024-11-24T18:56:52,122 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:39241/user/jenkins/test-data/3a0134bb-b7c4-7397-228f-a12bf096b197/.tmp/hbase.id]:[hdfs://localhost:39241/user/jenkins/test-data/3a0134bb-b7c4-7397-228f-a12bf096b197/hbase.id] 2024-11-24T18:56:52,133 INFO [master/f2b92657890a:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T18:56:52,133 INFO [master/f2b92657890a:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-24T18:56:52,134 INFO [master/f2b92657890a:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-24T18:56:52,145 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32805-0x1016e34a4370001, quorum=127.0.0.1:59046, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T18:56:52,145 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37509-0x1016e34a4370000, quorum=127.0.0.1:59046, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T18:56:52,151 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42655 is added to blk_1073741827_1003 (size=196) 2024-11-24T18:56:52,152 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34015 is added to blk_1073741827_1003 (size=196) 2024-11-24T18:56:52,153 INFO [master/f2b92657890a:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-24T18:56:52,154 INFO [master/f2b92657890a:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-24T18:56:52,154 INFO [master/f2b92657890a:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-24T18:56:52,160 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42655 is added to blk_1073741828_1004 (size=1189) 2024-11-24T18:56:52,161 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34015 is added to blk_1073741828_1004 (size=1189) 2024-11-24T18:56:52,161 INFO [master/f2b92657890a:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:39241/user/jenkins/test-data/3a0134bb-b7c4-7397-228f-a12bf096b197/MasterData/data/master/store 2024-11-24T18:56:52,167 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42655 is added to blk_1073741829_1005 (size=34) 2024-11-24T18:56:52,168 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34015 is added to blk_1073741829_1005 (size=34) 2024-11-24T18:56:52,168 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T18:56:52,168 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-24T18:56:52,168 INFO [master/f2b92657890a:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T18:56:52,168 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T18:56:52,168 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-24T18:56:52,168 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T18:56:52,168 INFO [master/f2b92657890a:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T18:56:52,168 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732474612168Disabling compacts and flushes for region at 1732474612168Disabling writes for close at 1732474612168Writing region close event to WAL at 1732474612168Closed at 1732474612168 2024-11-24T18:56:52,169 WARN [master/f2b92657890a:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:39241/user/jenkins/test-data/3a0134bb-b7c4-7397-228f-a12bf096b197/MasterData/data/master/store/.initializing 2024-11-24T18:56:52,169 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:39241/user/jenkins/test-data/3a0134bb-b7c4-7397-228f-a12bf096b197/MasterData/WALs/f2b92657890a,37509,1732474611868 2024-11-24T18:56:52,171 INFO [master/f2b92657890a:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=f2b92657890a%2C37509%2C1732474611868, suffix=, logDir=hdfs://localhost:39241/user/jenkins/test-data/3a0134bb-b7c4-7397-228f-a12bf096b197/MasterData/WALs/f2b92657890a,37509,1732474611868, archiveDir=hdfs://localhost:39241/user/jenkins/test-data/3a0134bb-b7c4-7397-228f-a12bf096b197/MasterData/oldWALs, maxLogs=10 2024-11-24T18:56:52,171 INFO [master/f2b92657890a:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor f2b92657890a%2C37509%2C1732474611868.1732474612171 2024-11-24T18:56:52,176 INFO [master/f2b92657890a:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/3a0134bb-b7c4-7397-228f-a12bf096b197/MasterData/WALs/f2b92657890a,37509,1732474611868/f2b92657890a%2C37509%2C1732474611868.1732474612171 2024-11-24T18:56:52,177 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44367:44367),(127.0.0.1/127.0.0.1:45991:45991)] 2024-11-24T18:56:52,178 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-24T18:56:52,178 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T18:56:52,178 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T18:56:52,178 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T18:56:52,179 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-24T18:56:52,181 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-24T18:56:52,181 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T18:56:52,181 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T18:56:52,181 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-24T18:56:52,182 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-24T18:56:52,183 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T18:56:52,183 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T18:56:52,183 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-24T18:56:52,185 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-24T18:56:52,185 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T18:56:52,185 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T18:56:52,185 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-24T18:56:52,187 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-24T18:56:52,187 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T18:56:52,187 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T18:56:52,187 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T18:56:52,188 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39241/user/jenkins/test-data/3a0134bb-b7c4-7397-228f-a12bf096b197/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-24T18:56:52,189 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39241/user/jenkins/test-data/3a0134bb-b7c4-7397-228f-a12bf096b197/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-24T18:56:52,190 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T18:56:52,190 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T18:56:52,190 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-24T18:56:52,192 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T18:56:52,194 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39241/user/jenkins/test-data/3a0134bb-b7c4-7397-228f-a12bf096b197/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-24T18:56:52,194 INFO [master/f2b92657890a:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=828598, jitterRate=0.053617268800735474}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-24T18:56:52,195 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732474612178Initializing all the Stores at 1732474612179 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732474612179Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732474612179Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732474612179Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732474612179Cleaning up temporary data from old regions at 1732474612190 (+11 ms)Region opened successfully at 1732474612195 (+5 ms) 2024-11-24T18:56:52,195 INFO [master/f2b92657890a:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-24T18:56:52,197 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@46f9a3ae, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=f2b92657890a/172.17.0.2:0 2024-11-24T18:56:52,198 INFO [master/f2b92657890a:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-24T18:56:52,198 INFO [master/f2b92657890a:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-24T18:56:52,198 INFO [master/f2b92657890a:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-24T18:56:52,199 INFO [master/f2b92657890a:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-24T18:56:52,199 INFO [master/f2b92657890a:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-24T18:56:52,199 INFO [master/f2b92657890a:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-24T18:56:52,199 INFO [master/f2b92657890a:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-24T18:56:52,201 INFO [master/f2b92657890a:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-24T18:56:52,202 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37509-0x1016e34a4370000, quorum=127.0.0.1:59046, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-24T18:56:52,208 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-24T18:56:52,209 INFO [master/f2b92657890a:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-24T18:56:52,209 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37509-0x1016e34a4370000, quorum=127.0.0.1:59046, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-24T18:56:52,219 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-24T18:56:52,219 INFO [master/f2b92657890a:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-24T18:56:52,220 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37509-0x1016e34a4370000, quorum=127.0.0.1:59046, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-24T18:56:52,229 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-24T18:56:52,230 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37509-0x1016e34a4370000, quorum=127.0.0.1:59046, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-24T18:56:52,240 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-24T18:56:52,243 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37509-0x1016e34a4370000, quorum=127.0.0.1:59046, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-24T18:56:52,250 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-24T18:56:52,261 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37509-0x1016e34a4370000, quorum=127.0.0.1:59046, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-24T18:56:52,261 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32805-0x1016e34a4370001, quorum=127.0.0.1:59046, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-24T18:56:52,261 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37509-0x1016e34a4370000, quorum=127.0.0.1:59046, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T18:56:52,261 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32805-0x1016e34a4370001, quorum=127.0.0.1:59046, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T18:56:52,262 INFO [master/f2b92657890a:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=f2b92657890a,37509,1732474611868, sessionid=0x1016e34a4370000, setting cluster-up flag (Was=false) 2024-11-24T18:56:52,282 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37509-0x1016e34a4370000, quorum=127.0.0.1:59046, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T18:56:52,282 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32805-0x1016e34a4370001, quorum=127.0.0.1:59046, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T18:56:52,314 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-24T18:56:52,315 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=f2b92657890a,37509,1732474611868 2024-11-24T18:56:52,335 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37509-0x1016e34a4370000, quorum=127.0.0.1:59046, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T18:56:52,335 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32805-0x1016e34a4370001, quorum=127.0.0.1:59046, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T18:56:52,366 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-24T18:56:52,368 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=f2b92657890a,37509,1732474611868 2024-11-24T18:56:52,370 INFO [master/f2b92657890a:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:39241/user/jenkins/test-data/3a0134bb-b7c4-7397-228f-a12bf096b197/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-24T18:56:52,372 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-24T18:56:52,372 INFO [master/f2b92657890a:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-24T18:56:52,373 INFO [master/f2b92657890a:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-24T18:56:52,373 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: f2b92657890a,37509,1732474611868 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-24T18:56:52,375 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/f2b92657890a:0, corePoolSize=5, maxPoolSize=5 2024-11-24T18:56:52,375 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/f2b92657890a:0, corePoolSize=5, maxPoolSize=5 2024-11-24T18:56:52,375 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/f2b92657890a:0, corePoolSize=5, maxPoolSize=5 2024-11-24T18:56:52,375 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/f2b92657890a:0, corePoolSize=5, maxPoolSize=5 2024-11-24T18:56:52,375 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/f2b92657890a:0, corePoolSize=10, maxPoolSize=10 2024-11-24T18:56:52,375 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/f2b92657890a:0, corePoolSize=1, maxPoolSize=1 2024-11-24T18:56:52,375 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/f2b92657890a:0, corePoolSize=2, maxPoolSize=2 2024-11-24T18:56:52,375 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/f2b92657890a:0, corePoolSize=1, maxPoolSize=1 2024-11-24T18:56:52,376 INFO [master/f2b92657890a:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732474642376 2024-11-24T18:56:52,376 INFO [master/f2b92657890a:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-24T18:56:52,376 INFO [master/f2b92657890a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-24T18:56:52,376 INFO [master/f2b92657890a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-24T18:56:52,376 INFO [master/f2b92657890a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-24T18:56:52,377 INFO [master/f2b92657890a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-24T18:56:52,377 INFO [master/f2b92657890a:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-24T18:56:52,377 INFO [master/f2b92657890a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-24T18:56:52,377 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-24T18:56:52,377 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-24T18:56:52,377 INFO [master/f2b92657890a:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-24T18:56:52,377 INFO [master/f2b92657890a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-24T18:56:52,378 INFO [master/f2b92657890a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-24T18:56:52,378 INFO [master/f2b92657890a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-24T18:56:52,378 INFO [master/f2b92657890a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-24T18:56:52,378 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/f2b92657890a:0:becomeActiveMaster-HFileCleaner.large.0-1732474612378,5,FailOnTimeoutGroup] 2024-11-24T18:56:52,379 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T18:56:52,379 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/f2b92657890a:0:becomeActiveMaster-HFileCleaner.small.0-1732474612378,5,FailOnTimeoutGroup] 2024-11-24T18:56:52,379 INFO [master/f2b92657890a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-24T18:56:52,379 INFO [master/f2b92657890a:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-24T18:56:52,379 INFO [master/f2b92657890a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-24T18:56:52,379 INFO [master/f2b92657890a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-24T18:56:52,379 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-24T18:56:52,388 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34015 is added to blk_1073741831_1007 (size=1321) 2024-11-24T18:56:52,388 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42655 is added to blk_1073741831_1007 (size=1321) 2024-11-24T18:56:52,389 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:39241/user/jenkins/test-data/3a0134bb-b7c4-7397-228f-a12bf096b197/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-24T18:56:52,389 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:39241/user/jenkins/test-data/3a0134bb-b7c4-7397-228f-a12bf096b197 2024-11-24T18:56:52,400 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42655 is added to blk_1073741832_1008 (size=32) 2024-11-24T18:56:52,400 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34015 is added to blk_1073741832_1008 (size=32) 2024-11-24T18:56:52,401 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T18:56:52,402 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-24T18:56:52,403 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-24T18:56:52,403 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T18:56:52,404 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T18:56:52,404 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-24T18:56:52,405 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-24T18:56:52,405 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T18:56:52,405 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T18:56:52,405 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-24T18:56:52,406 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-24T18:56:52,406 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T18:56:52,407 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T18:56:52,407 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-24T18:56:52,408 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-24T18:56:52,408 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T18:56:52,408 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T18:56:52,408 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-24T18:56:52,409 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39241/user/jenkins/test-data/3a0134bb-b7c4-7397-228f-a12bf096b197/data/hbase/meta/1588230740 2024-11-24T18:56:52,409 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39241/user/jenkins/test-data/3a0134bb-b7c4-7397-228f-a12bf096b197/data/hbase/meta/1588230740 2024-11-24T18:56:52,410 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-24T18:56:52,410 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-24T18:56:52,411 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-24T18:56:52,412 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-24T18:56:52,414 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39241/user/jenkins/test-data/3a0134bb-b7c4-7397-228f-a12bf096b197/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-24T18:56:52,414 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=764545, jitterRate=-0.027831047773361206}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-24T18:56:52,415 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732474612401Initializing all the Stores at 1732474612401Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732474612402 (+1 ms)Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732474612402Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732474612402Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732474612402Cleaning up temporary data from old regions at 1732474612410 (+8 ms)Region opened successfully at 1732474612414 (+4 ms) 2024-11-24T18:56:52,415 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-24T18:56:52,415 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-24T18:56:52,415 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-24T18:56:52,415 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-24T18:56:52,415 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-24T18:56:52,415 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-24T18:56:52,415 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732474612415Disabling compacts and flushes for region at 1732474612415Disabling writes for close at 1732474612415Writing region close event to WAL at 1732474612415Closed at 1732474612415 2024-11-24T18:56:52,416 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-24T18:56:52,416 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-24T18:56:52,416 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-24T18:56:52,417 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-24T18:56:52,418 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-24T18:56:52,470 INFO [RS:0;f2b92657890a:32805 {}] regionserver.HRegionServer(746): ClusterId : 51e73470-1b9f-43a3-b954-e56d0cd7cda4 2024-11-24T18:56:52,471 DEBUG [RS:0;f2b92657890a:32805 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-24T18:56:52,483 DEBUG [RS:0;f2b92657890a:32805 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-24T18:56:52,483 DEBUG [RS:0;f2b92657890a:32805 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-24T18:56:52,493 DEBUG [RS:0;f2b92657890a:32805 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-24T18:56:52,494 DEBUG [RS:0;f2b92657890a:32805 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@47f8d975, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=f2b92657890a/172.17.0.2:0 2024-11-24T18:56:52,505 DEBUG [RS:0;f2b92657890a:32805 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;f2b92657890a:32805 2024-11-24T18:56:52,505 INFO [RS:0;f2b92657890a:32805 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-24T18:56:52,505 INFO [RS:0;f2b92657890a:32805 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-24T18:56:52,505 DEBUG [RS:0;f2b92657890a:32805 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-24T18:56:52,506 INFO [RS:0;f2b92657890a:32805 {}] regionserver.HRegionServer(2659): reportForDuty to master=f2b92657890a,37509,1732474611868 with port=32805, startcode=1732474612045 2024-11-24T18:56:52,506 DEBUG [RS:0;f2b92657890a:32805 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-24T18:56:52,508 INFO [HMaster-EventLoopGroup-16-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47285, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.7 (auth:SIMPLE), service=RegionServerStatusService 2024-11-24T18:56:52,509 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37509 {}] master.ServerManager(363): Checking decommissioned status of RegionServer f2b92657890a,32805,1732474612045 2024-11-24T18:56:52,509 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37509 {}] master.ServerManager(517): Registering regionserver=f2b92657890a,32805,1732474612045 2024-11-24T18:56:52,510 DEBUG [RS:0;f2b92657890a:32805 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:39241/user/jenkins/test-data/3a0134bb-b7c4-7397-228f-a12bf096b197 2024-11-24T18:56:52,510 DEBUG [RS:0;f2b92657890a:32805 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:39241 2024-11-24T18:56:52,510 DEBUG [RS:0;f2b92657890a:32805 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-24T18:56:52,521 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37509-0x1016e34a4370000, quorum=127.0.0.1:59046, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-24T18:56:52,521 DEBUG [RS:0;f2b92657890a:32805 {}] zookeeper.ZKUtil(111): regionserver:32805-0x1016e34a4370001, quorum=127.0.0.1:59046, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/f2b92657890a,32805,1732474612045 2024-11-24T18:56:52,521 WARN [RS:0;f2b92657890a:32805 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-24T18:56:52,521 INFO [RS:0;f2b92657890a:32805 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-24T18:56:52,521 DEBUG [RS:0;f2b92657890a:32805 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:39241/user/jenkins/test-data/3a0134bb-b7c4-7397-228f-a12bf096b197/WALs/f2b92657890a,32805,1732474612045 2024-11-24T18:56:52,522 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [f2b92657890a,32805,1732474612045] 2024-11-24T18:56:52,525 INFO [RS:0;f2b92657890a:32805 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-24T18:56:52,525 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,43977,1732474387094/f2b92657890a%2C43977%2C1732474387094.1732474387326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:56:52,528 INFO [RS:0;f2b92657890a:32805 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-24T18:56:52,529 INFO [RS:0;f2b92657890a:32805 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-24T18:56:52,529 INFO [RS:0;f2b92657890a:32805 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T18:56:52,548 INFO [RS:0;f2b92657890a:32805 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-24T18:56:52,549 INFO [RS:0;f2b92657890a:32805 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-24T18:56:52,549 INFO [RS:0;f2b92657890a:32805 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-24T18:56:52,549 DEBUG [RS:0;f2b92657890a:32805 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/f2b92657890a:0, corePoolSize=1, maxPoolSize=1 2024-11-24T18:56:52,549 DEBUG [RS:0;f2b92657890a:32805 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/f2b92657890a:0, corePoolSize=1, maxPoolSize=1 2024-11-24T18:56:52,549 DEBUG [RS:0;f2b92657890a:32805 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/f2b92657890a:0, corePoolSize=1, maxPoolSize=1 2024-11-24T18:56:52,549 DEBUG [RS:0;f2b92657890a:32805 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/f2b92657890a:0, corePoolSize=1, maxPoolSize=1 2024-11-24T18:56:52,549 DEBUG [RS:0;f2b92657890a:32805 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/f2b92657890a:0, corePoolSize=1, maxPoolSize=1 2024-11-24T18:56:52,549 DEBUG [RS:0;f2b92657890a:32805 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/f2b92657890a:0, corePoolSize=2, maxPoolSize=2 2024-11-24T18:56:52,549 DEBUG [RS:0;f2b92657890a:32805 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/f2b92657890a:0, corePoolSize=1, maxPoolSize=1 2024-11-24T18:56:52,549 DEBUG [RS:0;f2b92657890a:32805 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/f2b92657890a:0, corePoolSize=1, maxPoolSize=1 2024-11-24T18:56:52,550 DEBUG [RS:0;f2b92657890a:32805 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/f2b92657890a:0, corePoolSize=1, maxPoolSize=1 2024-11-24T18:56:52,550 DEBUG [RS:0;f2b92657890a:32805 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/f2b92657890a:0, corePoolSize=1, maxPoolSize=1 2024-11-24T18:56:52,550 DEBUG [RS:0;f2b92657890a:32805 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/f2b92657890a:0, corePoolSize=1, maxPoolSize=1 2024-11-24T18:56:52,550 DEBUG [RS:0;f2b92657890a:32805 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/f2b92657890a:0, corePoolSize=1, maxPoolSize=1 2024-11-24T18:56:52,550 DEBUG [RS:0;f2b92657890a:32805 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/f2b92657890a:0, corePoolSize=3, maxPoolSize=3 2024-11-24T18:56:52,550 DEBUG [RS:0;f2b92657890a:32805 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/f2b92657890a:0, corePoolSize=3, maxPoolSize=3 2024-11-24T18:56:52,550 INFO [RS:0;f2b92657890a:32805 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-24T18:56:52,550 INFO [RS:0;f2b92657890a:32805 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-24T18:56:52,550 INFO [RS:0;f2b92657890a:32805 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T18:56:52,550 INFO [RS:0;f2b92657890a:32805 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-24T18:56:52,550 INFO [RS:0;f2b92657890a:32805 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-24T18:56:52,550 INFO [RS:0;f2b92657890a:32805 {}] hbase.ChoreService(168): Chore ScheduledChore name=f2b92657890a,32805,1732474612045-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-24T18:56:52,560 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.meta.1732474386931.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:56:52,568 INFO [RS:0;f2b92657890a:32805 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-24T18:56:52,568 INFO [RS:0;f2b92657890a:32805 {}] hbase.ChoreService(168): Chore ScheduledChore name=f2b92657890a,32805,1732474612045-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T18:56:52,568 INFO [RS:0;f2b92657890a:32805 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T18:56:52,568 INFO [RS:0;f2b92657890a:32805 {}] regionserver.Replication(171): f2b92657890a,32805,1732474612045 started 2024-11-24T18:56:52,568 WARN [f2b92657890a:37509 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-24T18:56:52,581 INFO [RS:0;f2b92657890a:32805 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T18:56:52,582 INFO [RS:0;f2b92657890a:32805 {}] regionserver.HRegionServer(1482): Serving as f2b92657890a,32805,1732474612045, RpcServer on f2b92657890a/172.17.0.2:32805, sessionid=0x1016e34a4370001 2024-11-24T18:56:52,582 DEBUG [RS:0;f2b92657890a:32805 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-24T18:56:52,582 DEBUG [RS:0;f2b92657890a:32805 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager f2b92657890a,32805,1732474612045 2024-11-24T18:56:52,582 DEBUG [RS:0;f2b92657890a:32805 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'f2b92657890a,32805,1732474612045' 2024-11-24T18:56:52,582 DEBUG [RS:0;f2b92657890a:32805 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-24T18:56:52,582 DEBUG [RS:0;f2b92657890a:32805 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-24T18:56:52,583 DEBUG [RS:0;f2b92657890a:32805 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-24T18:56:52,583 DEBUG [RS:0;f2b92657890a:32805 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-24T18:56:52,583 DEBUG [RS:0;f2b92657890a:32805 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager f2b92657890a,32805,1732474612045 2024-11-24T18:56:52,583 DEBUG [RS:0;f2b92657890a:32805 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'f2b92657890a,32805,1732474612045' 2024-11-24T18:56:52,583 DEBUG [RS:0;f2b92657890a:32805 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-24T18:56:52,583 DEBUG [RS:0;f2b92657890a:32805 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-24T18:56:52,583 DEBUG [RS:0;f2b92657890a:32805 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-24T18:56:52,583 INFO [RS:0;f2b92657890a:32805 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-24T18:56:52,583 INFO [RS:0;f2b92657890a:32805 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-24T18:56:52,685 INFO [RS:0;f2b92657890a:32805 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=f2b92657890a%2C32805%2C1732474612045, suffix=, logDir=hdfs://localhost:39241/user/jenkins/test-data/3a0134bb-b7c4-7397-228f-a12bf096b197/WALs/f2b92657890a,32805,1732474612045, archiveDir=hdfs://localhost:39241/user/jenkins/test-data/3a0134bb-b7c4-7397-228f-a12bf096b197/oldWALs, maxLogs=32 2024-11-24T18:56:52,686 INFO [RS:0;f2b92657890a:32805 {}] monitor.StreamSlowMonitor(122): New stream slow monitor f2b92657890a%2C32805%2C1732474612045.1732474612685 2024-11-24T18:56:52,690 INFO [RS:0;f2b92657890a:32805 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/3a0134bb-b7c4-7397-228f-a12bf096b197/WALs/f2b92657890a,32805,1732474612045/f2b92657890a%2C32805%2C1732474612045.1732474612685 2024-11-24T18:56:52,691 DEBUG [RS:0;f2b92657890a:32805 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45991:45991),(127.0.0.1/127.0.0.1:44367:44367)] 2024-11-24T18:56:52,819 DEBUG [f2b92657890a:37509 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-24T18:56:52,819 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=f2b92657890a,32805,1732474612045 2024-11-24T18:56:52,820 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as f2b92657890a,32805,1732474612045, state=OPENING 2024-11-24T18:56:52,829 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-24T18:56:52,840 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37509-0x1016e34a4370000, quorum=127.0.0.1:59046, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T18:56:52,840 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32805-0x1016e34a4370001, quorum=127.0.0.1:59046, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T18:56:52,840 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-24T18:56:52,841 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-24T18:56:52,841 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-24T18:56:52,841 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=f2b92657890a,32805,1732474612045}] 2024-11-24T18:56:52,861 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:56:52,861 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:56:52,861 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:56:52,861 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:56:52,861 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:56:52,862 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:56:52,862 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:56:52,862 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:56:52,863 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:56:52,863 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:56:52,891 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:56:52,891 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:56:52,891 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:56:52,891 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:56:52,891 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:56:52,892 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:56:52,896 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:56:52,896 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:56:52,896 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:56:52,898 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:56:52,994 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-24T18:56:52,996 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54545, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-24T18:56:52,999 INFO [RS_OPEN_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-24T18:56:52,999 INFO [RS_OPEN_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-24T18:56:53,001 INFO [RS_OPEN_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=f2b92657890a%2C32805%2C1732474612045.meta, suffix=.meta, logDir=hdfs://localhost:39241/user/jenkins/test-data/3a0134bb-b7c4-7397-228f-a12bf096b197/WALs/f2b92657890a,32805,1732474612045, archiveDir=hdfs://localhost:39241/user/jenkins/test-data/3a0134bb-b7c4-7397-228f-a12bf096b197/oldWALs, maxLogs=32 2024-11-24T18:56:53,001 INFO [RS_OPEN_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor f2b92657890a%2C32805%2C1732474612045.meta.1732474613001.meta 2024-11-24T18:56:53,008 INFO [RS_OPEN_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/3a0134bb-b7c4-7397-228f-a12bf096b197/WALs/f2b92657890a,32805,1732474612045/f2b92657890a%2C32805%2C1732474612045.meta.1732474613001.meta 2024-11-24T18:56:53,013 DEBUG [RS_OPEN_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45991:45991),(127.0.0.1/127.0.0.1:44367:44367)] 2024-11-24T18:56:53,016 DEBUG [RS_OPEN_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-24T18:56:53,017 DEBUG [RS_OPEN_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-24T18:56:53,017 DEBUG [RS_OPEN_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-24T18:56:53,017 INFO [RS_OPEN_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-24T18:56:53,017 DEBUG [RS_OPEN_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-24T18:56:53,017 DEBUG [RS_OPEN_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T18:56:53,017 DEBUG [RS_OPEN_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-24T18:56:53,017 DEBUG [RS_OPEN_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-24T18:56:53,019 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-24T18:56:53,020 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-24T18:56:53,020 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T18:56:53,020 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T18:56:53,020 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-24T18:56:53,021 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-24T18:56:53,021 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T18:56:53,021 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T18:56:53,021 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-24T18:56:53,022 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-24T18:56:53,022 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T18:56:53,022 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T18:56:53,022 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-24T18:56:53,023 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-24T18:56:53,023 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T18:56:53,024 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T18:56:53,024 DEBUG [RS_OPEN_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-24T18:56:53,024 DEBUG [RS_OPEN_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39241/user/jenkins/test-data/3a0134bb-b7c4-7397-228f-a12bf096b197/data/hbase/meta/1588230740 2024-11-24T18:56:53,025 DEBUG [RS_OPEN_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39241/user/jenkins/test-data/3a0134bb-b7c4-7397-228f-a12bf096b197/data/hbase/meta/1588230740 2024-11-24T18:56:53,028 DEBUG [RS_OPEN_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-24T18:56:53,028 DEBUG [RS_OPEN_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-24T18:56:53,029 DEBUG [RS_OPEN_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-24T18:56:53,030 DEBUG [RS_OPEN_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-24T18:56:53,031 INFO [RS_OPEN_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=762831, jitterRate=-0.030010908842086792}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-24T18:56:53,031 DEBUG [RS_OPEN_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-24T18:56:53,031 DEBUG [RS_OPEN_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732474613018Writing region info on filesystem at 1732474613018Initializing all the Stores at 1732474613018Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732474613018Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732474613019 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732474613019Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732474613019Cleaning up temporary data from old regions at 1732474613028 (+9 ms)Running coprocessor post-open hooks at 1732474613031 (+3 ms)Region opened successfully at 1732474613031 2024-11-24T18:56:53,032 INFO [RS_OPEN_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732474612993 2024-11-24T18:56:53,034 DEBUG [RS_OPEN_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-24T18:56:53,034 INFO [RS_OPEN_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-24T18:56:53,041 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=f2b92657890a,32805,1732474612045 2024-11-24T18:56:53,042 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as f2b92657890a,32805,1732474612045, state=OPEN 2024-11-24T18:56:53,076 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32805-0x1016e34a4370001, quorum=127.0.0.1:59046, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-24T18:56:53,076 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37509-0x1016e34a4370000, quorum=127.0.0.1:59046, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-24T18:56:53,076 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=f2b92657890a,32805,1732474612045 2024-11-24T18:56:53,077 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-24T18:56:53,077 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-24T18:56:53,079 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-24T18:56:53,080 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=f2b92657890a,32805,1732474612045 in 236 msec 2024-11-24T18:56:53,082 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-24T18:56:53,082 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 663 msec 2024-11-24T18:56:53,082 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-24T18:56:53,082 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-24T18:56:53,084 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-24T18:56:53,084 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=f2b92657890a,32805,1732474612045, seqNum=-1] 2024-11-24T18:56:53,084 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-24T18:56:53,086 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52933, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-24T18:56:53,091 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 719 msec 2024-11-24T18:56:53,091 INFO [master/f2b92657890a:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732474613091, completionTime=-1 2024-11-24T18:56:53,091 INFO [master/f2b92657890a:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-24T18:56:53,091 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-24T18:56:53,095 INFO [master/f2b92657890a:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-24T18:56:53,095 INFO [master/f2b92657890a:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732474673095 2024-11-24T18:56:53,095 INFO [master/f2b92657890a:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732474733095 2024-11-24T18:56:53,095 INFO [master/f2b92657890a:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 4 msec 2024-11-24T18:56:53,096 INFO [master/f2b92657890a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=f2b92657890a,37509,1732474611868-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T18:56:53,096 INFO [master/f2b92657890a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=f2b92657890a,37509,1732474611868-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T18:56:53,096 INFO [master/f2b92657890a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=f2b92657890a,37509,1732474611868-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T18:56:53,096 INFO [master/f2b92657890a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-f2b92657890a:37509, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T18:56:53,096 INFO [master/f2b92657890a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-24T18:56:53,096 INFO [master/f2b92657890a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-24T18:56:53,098 DEBUG [master/f2b92657890a:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-24T18:56:53,100 INFO [master/f2b92657890a:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.986sec 2024-11-24T18:56:53,100 INFO [master/f2b92657890a:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-24T18:56:53,100 INFO [master/f2b92657890a:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-24T18:56:53,100 INFO [master/f2b92657890a:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-24T18:56:53,100 INFO [master/f2b92657890a:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-24T18:56:53,100 INFO [master/f2b92657890a:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-24T18:56:53,100 INFO [master/f2b92657890a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=f2b92657890a,37509,1732474611868-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-24T18:56:53,100 INFO [master/f2b92657890a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=f2b92657890a,37509,1732474611868-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-24T18:56:53,103 DEBUG [master/f2b92657890a:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-24T18:56:53,103 INFO [master/f2b92657890a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-24T18:56:53,103 INFO [master/f2b92657890a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=f2b92657890a,37509,1732474611868-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T18:56:53,171 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@41fb0348, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-24T18:56:53,171 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request f2b92657890a,37509,-1 for getting cluster id 2024-11-24T18:56:53,171 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-24T18:56:53,172 DEBUG [HMaster-EventLoopGroup-16-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '51e73470-1b9f-43a3-b954-e56d0cd7cda4' 2024-11-24T18:56:53,173 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-24T18:56:53,173 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "51e73470-1b9f-43a3-b954-e56d0cd7cda4" 2024-11-24T18:56:53,173 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2a276ca4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-24T18:56:53,173 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [f2b92657890a,37509,-1] 2024-11-24T18:56:53,173 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-24T18:56:53,174 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T18:56:53,175 INFO [HMaster-EventLoopGroup-16-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38672, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-24T18:56:53,176 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@afbbfe3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-24T18:56:53,176 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-24T18:56:53,177 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=f2b92657890a,32805,1732474612045, seqNum=-1] 2024-11-24T18:56:53,177 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-24T18:56:53,178 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35568, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-24T18:56:53,180 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=f2b92657890a,37509,1732474611868 2024-11-24T18:56:53,180 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T18:56:53,183 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-24T18:56:53,183 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-24T18:56:53,185 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=test.com%2C8080%2C1, suffix=, logDir=hdfs://localhost:39241/user/jenkins/test-data/3a0134bb-b7c4-7397-228f-a12bf096b197/WALs/test.com,8080,1, archiveDir=hdfs://localhost:39241/user/jenkins/test-data/3a0134bb-b7c4-7397-228f-a12bf096b197/oldWALs, maxLogs=32 2024-11-24T18:56:53,185 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor test.com%2C8080%2C1.1732474613185 2024-11-24T18:56:53,190 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/3a0134bb-b7c4-7397-228f-a12bf096b197/WALs/test.com,8080,1/test.com%2C8080%2C1.1732474613185 2024-11-24T18:56:53,192 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44367:44367),(127.0.0.1/127.0.0.1:45991:45991)] 2024-11-24T18:56:53,204 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor test.com%2C8080%2C1.1732474613204 2024-11-24T18:56:53,210 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:56:53,210 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:56:53,211 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:56:53,211 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:56:53,211 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:56:53,211 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/3a0134bb-b7c4-7397-228f-a12bf096b197/WALs/test.com,8080,1/test.com%2C8080%2C1.1732474613185 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/3a0134bb-b7c4-7397-228f-a12bf096b197/WALs/test.com,8080,1/test.com%2C8080%2C1.1732474613204 2024-11-24T18:56:53,213 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44367:44367),(127.0.0.1/127.0.0.1:45991:45991)] 2024-11-24T18:56:53,213 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:39241/user/jenkins/test-data/3a0134bb-b7c4-7397-228f-a12bf096b197/WALs/test.com,8080,1/test.com%2C8080%2C1.1732474613185 is not closed yet, will try archiving it next time 2024-11-24T18:56:53,214 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42655 is added to blk_1073741835_1011 (size=93) 2024-11-24T18:56:53,214 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:56:53,214 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:56:53,214 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:56:53,214 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34015 is added to blk_1073741835_1011 (size=93) 2024-11-24T18:56:53,214 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:56:53,215 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:56:53,216 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:39241/user/jenkins/test-data/3a0134bb-b7c4-7397-228f-a12bf096b197/WALs/test.com,8080,1/test.com%2C8080%2C1.1732474613185 to hdfs://localhost:39241/user/jenkins/test-data/3a0134bb-b7c4-7397-228f-a12bf096b197/oldWALs/test.com%2C8080%2C1.1732474613185 2024-11-24T18:56:53,216 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42655 is added to blk_1073741836_1012 (size=93) 2024-11-24T18:56:53,217 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34015 is added to blk_1073741836_1012 (size=93) 2024-11-24T18:56:53,219 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/3a0134bb-b7c4-7397-228f-a12bf096b197/oldWALs 2024-11-24T18:56:53,219 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog test.com%2C8080%2C1:(num 1732474613204) 2024-11-24T18:56:53,219 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-24T18:56:53,219 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-24T18:56:53,219 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-24T18:56:53,219 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T18:56:53,219 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T18:56:53,219 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-24T18:56:53,220 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-24T18:56:53,220 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1411478328, stopped=false 2024-11-24T18:56:53,220 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=f2b92657890a,37509,1732474611868 2024-11-24T18:56:53,240 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32805-0x1016e34a4370001, quorum=127.0.0.1:59046, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-24T18:56:53,240 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37509-0x1016e34a4370000, quorum=127.0.0.1:59046, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-24T18:56:53,240 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32805-0x1016e34a4370001, quorum=127.0.0.1:59046, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T18:56:53,240 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37509-0x1016e34a4370000, quorum=127.0.0.1:59046, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T18:56:53,240 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-24T18:56:53,240 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-24T18:56:53,240 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-24T18:56:53,240 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T18:56:53,241 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:32805-0x1016e34a4370001, quorum=127.0.0.1:59046, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-24T18:56:53,241 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'f2b92657890a,32805,1732474612045' ***** 2024-11-24T18:56:53,241 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-24T18:56:53,241 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:37509-0x1016e34a4370000, quorum=127.0.0.1:59046, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-24T18:56:53,241 INFO [RS:0;f2b92657890a:32805 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-24T18:56:53,241 INFO [RS:0;f2b92657890a:32805 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-24T18:56:53,241 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-24T18:56:53,241 INFO [RS:0;f2b92657890a:32805 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-24T18:56:53,241 INFO [RS:0;f2b92657890a:32805 {}] regionserver.HRegionServer(959): stopping server f2b92657890a,32805,1732474612045 2024-11-24T18:56:53,241 INFO [RS:0;f2b92657890a:32805 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-24T18:56:53,241 INFO [RS:0;f2b92657890a:32805 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;f2b92657890a:32805. 2024-11-24T18:56:53,241 DEBUG [RS:0;f2b92657890a:32805 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-24T18:56:53,242 DEBUG [RS:0;f2b92657890a:32805 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T18:56:53,242 INFO [RS:0;f2b92657890a:32805 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-24T18:56:53,242 INFO [RS:0;f2b92657890a:32805 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-24T18:56:53,242 INFO [RS:0;f2b92657890a:32805 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-24T18:56:53,242 INFO [RS:0;f2b92657890a:32805 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-24T18:56:53,242 INFO [RS:0;f2b92657890a:32805 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-24T18:56:53,242 DEBUG [RS:0;f2b92657890a:32805 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-11-24T18:56:53,242 DEBUG [RS:0;f2b92657890a:32805 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-24T18:56:53,242 DEBUG [RS_CLOSE_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-24T18:56:53,242 INFO [RS_CLOSE_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-24T18:56:53,242 DEBUG [RS_CLOSE_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-24T18:56:53,242 DEBUG [RS_CLOSE_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-24T18:56:53,242 DEBUG [RS_CLOSE_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-24T18:56:53,242 INFO [RS_CLOSE_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=74 B heapSize=1.22 KB 2024-11-24T18:56:53,259 DEBUG [RS_CLOSE_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39241/user/jenkins/test-data/3a0134bb-b7c4-7397-228f-a12bf096b197/data/hbase/meta/1588230740/.tmp/ns/2eb6099cee0e4f81b7fb314418649608 is 43, key is default/ns:d/1732474613086/Put/seqid=0 2024-11-24T18:56:53,263 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34015 is added to blk_1073741837_1013 (size=5153) 2024-11-24T18:56:53,263 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42655 is added to blk_1073741837_1013 (size=5153) 2024-11-24T18:56:53,263 INFO [RS_CLOSE_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:39241/user/jenkins/test-data/3a0134bb-b7c4-7397-228f-a12bf096b197/data/hbase/meta/1588230740/.tmp/ns/2eb6099cee0e4f81b7fb314418649608 2024-11-24T18:56:53,270 DEBUG [RS_CLOSE_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39241/user/jenkins/test-data/3a0134bb-b7c4-7397-228f-a12bf096b197/data/hbase/meta/1588230740/.tmp/ns/2eb6099cee0e4f81b7fb314418649608 as hdfs://localhost:39241/user/jenkins/test-data/3a0134bb-b7c4-7397-228f-a12bf096b197/data/hbase/meta/1588230740/ns/2eb6099cee0e4f81b7fb314418649608 2024-11-24T18:56:53,275 INFO [RS_CLOSE_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39241/user/jenkins/test-data/3a0134bb-b7c4-7397-228f-a12bf096b197/data/hbase/meta/1588230740/ns/2eb6099cee0e4f81b7fb314418649608, entries=2, sequenceid=6, filesize=5.0 K 2024-11-24T18:56:53,277 INFO [RS_CLOSE_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 35ms, sequenceid=6, compaction requested=false 2024-11-24T18:56:53,281 DEBUG [RS_CLOSE_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39241/user/jenkins/test-data/3a0134bb-b7c4-7397-228f-a12bf096b197/data/hbase/meta/1588230740/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-24T18:56:53,282 DEBUG [RS_CLOSE_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-24T18:56:53,282 INFO [RS_CLOSE_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-24T18:56:53,282 DEBUG [RS_CLOSE_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732474613242Running coprocessor pre-close hooks at 1732474613242Disabling compacts and flushes for region at 1732474613242Disabling writes for close at 1732474613242Obtaining lock to block concurrent updates at 1732474613242Preparing flush snapshotting stores in 1588230740 at 1732474613242Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=74, getHeapSize=1184, getOffHeapSize=0, getCellsCount=2 at 1732474613243 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1732474613243Flushing 1588230740/ns: creating writer at 1732474613243Flushing 1588230740/ns: appending metadata at 1732474613258 (+15 ms)Flushing 1588230740/ns: closing flushed file at 1732474613258Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@55d8e8fb: reopening flushed file at 1732474613269 (+11 ms)Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 35ms, sequenceid=6, compaction requested=false at 1732474613277 (+8 ms)Writing region close event to WAL at 1732474613278 (+1 ms)Running coprocessor post-close hooks at 1732474613282 (+4 ms)Closed at 1732474613282 2024-11-24T18:56:53,282 DEBUG [RS_CLOSE_META-regionserver/f2b92657890a:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-24T18:56:53,407 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-24T18:56:53,407 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:56:53,408 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:56:53,408 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:56:53,408 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:56:53,408 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:56:53,409 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:56:53,410 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:56:53,410 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:56:53,410 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:56:53,410 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:56:53,435 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:56:53,435 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:56:53,436 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:56:53,436 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:56:53,436 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:56:53,436 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:56:53,440 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:56:53,440 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:56:53,441 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:56:53,442 INFO [RS:0;f2b92657890a:32805 {}] regionserver.HRegionServer(976): stopping server f2b92657890a,32805,1732474612045; all regions closed. 2024-11-24T18:56:53,443 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:56:53,443 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:56:53,443 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T18:56:53,443 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:56:53,443 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:56:53,443 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:56:53,445 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34015 is added to blk_1073741834_1010 (size=1152) 2024-11-24T18:56:53,445 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42655 is added to blk_1073741834_1010 (size=1152) 2024-11-24T18:56:53,447 DEBUG [RS:0;f2b92657890a:32805 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/3a0134bb-b7c4-7397-228f-a12bf096b197/oldWALs 2024-11-24T18:56:53,447 INFO [RS:0;f2b92657890a:32805 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog f2b92657890a%2C32805%2C1732474612045.meta:.meta(num 1732474613001) 2024-11-24T18:56:53,447 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:56:53,448 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:56:53,448 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:56:53,448 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:56:53,448 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:56:53,449 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34015 is added to blk_1073741833_1009 (size=93) 2024-11-24T18:56:53,449 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42655 is added to blk_1073741833_1009 (size=93) 2024-11-24T18:56:53,451 DEBUG [RS:0;f2b92657890a:32805 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/3a0134bb-b7c4-7397-228f-a12bf096b197/oldWALs 2024-11-24T18:56:53,451 INFO [RS:0;f2b92657890a:32805 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog f2b92657890a%2C32805%2C1732474612045:(num 1732474612685) 2024-11-24T18:56:53,451 DEBUG [RS:0;f2b92657890a:32805 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T18:56:53,451 INFO [RS:0;f2b92657890a:32805 {}] regionserver.LeaseManager(133): Closed leases 2024-11-24T18:56:53,451 INFO [RS:0;f2b92657890a:32805 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-24T18:56:53,451 INFO [RS:0;f2b92657890a:32805 {}] hbase.ChoreService(370): Chore service for: regionserver/f2b92657890a:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-24T18:56:53,452 INFO [RS:0;f2b92657890a:32805 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-24T18:56:53,452 INFO [regionserver/f2b92657890a:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-24T18:56:53,452 INFO [RS:0;f2b92657890a:32805 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:32805 2024-11-24T18:56:53,461 INFO [RS:0;f2b92657890a:32805 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-24T18:56:53,461 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37509-0x1016e34a4370000, quorum=127.0.0.1:59046, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-24T18:56:53,461 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32805-0x1016e34a4370001, quorum=127.0.0.1:59046, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/f2b92657890a,32805,1732474612045 2024-11-24T18:56:53,471 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [f2b92657890a,32805,1732474612045] 2024-11-24T18:56:53,482 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/f2b92657890a,32805,1732474612045 already deleted, retry=false 2024-11-24T18:56:53,482 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; f2b92657890a,32805,1732474612045 expired; onlineServers=0 2024-11-24T18:56:53,482 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'f2b92657890a,37509,1732474611868' ***** 2024-11-24T18:56:53,482 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-24T18:56:53,482 INFO [M:0;f2b92657890a:37509 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-24T18:56:53,482 INFO [M:0;f2b92657890a:37509 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-24T18:56:53,482 DEBUG [M:0;f2b92657890a:37509 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-24T18:56:53,482 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-24T18:56:53,482 DEBUG [M:0;f2b92657890a:37509 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-24T18:56:53,482 DEBUG [master/f2b92657890a:0:becomeActiveMaster-HFileCleaner.small.0-1732474612378 {}] cleaner.HFileCleaner(306): Exit Thread[master/f2b92657890a:0:becomeActiveMaster-HFileCleaner.small.0-1732474612378,5,FailOnTimeoutGroup] 2024-11-24T18:56:53,482 DEBUG [master/f2b92657890a:0:becomeActiveMaster-HFileCleaner.large.0-1732474612378 {}] cleaner.HFileCleaner(306): Exit Thread[master/f2b92657890a:0:becomeActiveMaster-HFileCleaner.large.0-1732474612378,5,FailOnTimeoutGroup] 2024-11-24T18:56:53,483 INFO [M:0;f2b92657890a:37509 {}] hbase.ChoreService(370): Chore service for: master/f2b92657890a:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-24T18:56:53,483 INFO [M:0;f2b92657890a:37509 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-24T18:56:53,483 DEBUG [M:0;f2b92657890a:37509 {}] master.HMaster(1795): Stopping service threads 2024-11-24T18:56:53,483 INFO [M:0;f2b92657890a:37509 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-24T18:56:53,483 INFO [M:0;f2b92657890a:37509 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-24T18:56:53,483 INFO [M:0;f2b92657890a:37509 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-24T18:56:53,483 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-24T18:56:53,493 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37509-0x1016e34a4370000, quorum=127.0.0.1:59046, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-24T18:56:53,493 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37509-0x1016e34a4370000, quorum=127.0.0.1:59046, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T18:56:53,493 DEBUG [M:0;f2b92657890a:37509 {}] zookeeper.ZKUtil(347): master:37509-0x1016e34a4370000, quorum=127.0.0.1:59046, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-24T18:56:53,493 WARN [M:0;f2b92657890a:37509 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-24T18:56:53,494 INFO [M:0;f2b92657890a:37509 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:39241/user/jenkins/test-data/3a0134bb-b7c4-7397-228f-a12bf096b197/.lastflushedseqids 2024-11-24T18:56:53,509 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42655 is added to blk_1073741838_1014 (size=99) 2024-11-24T18:56:53,509 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34015 is added to blk_1073741838_1014 (size=99) 2024-11-24T18:56:53,510 INFO [M:0;f2b92657890a:37509 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-24T18:56:53,510 INFO [M:0;f2b92657890a:37509 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-24T18:56:53,510 DEBUG [M:0;f2b92657890a:37509 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-24T18:56:53,510 INFO [M:0;f2b92657890a:37509 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T18:56:53,510 DEBUG [M:0;f2b92657890a:37509 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T18:56:53,510 DEBUG [M:0;f2b92657890a:37509 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-24T18:56:53,510 DEBUG [M:0;f2b92657890a:37509 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T18:56:53,510 INFO [M:0;f2b92657890a:37509 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=7.67 KB heapSize=11.34 KB 2024-11-24T18:56:53,526 DEBUG [M:0;f2b92657890a:37509 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39241/user/jenkins/test-data/3a0134bb-b7c4-7397-228f-a12bf096b197/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/f7abcaa3179d4dd7ace11054f331a65e is 82, key is hbase:meta,,1/info:regioninfo/1732474613040/Put/seqid=0 2024-11-24T18:56:53,526 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,43977,1732474387094/f2b92657890a%2C43977%2C1732474387094.1732474387326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:56:53,530 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42655 is added to blk_1073741839_1015 (size=5672) 2024-11-24T18:56:53,530 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34015 is added to blk_1073741839_1015 (size=5672) 2024-11-24T18:56:53,530 INFO [M:0;f2b92657890a:37509 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:39241/user/jenkins/test-data/3a0134bb-b7c4-7397-228f-a12bf096b197/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/f7abcaa3179d4dd7ace11054f331a65e 2024-11-24T18:56:53,552 DEBUG [M:0;f2b92657890a:37509 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39241/user/jenkins/test-data/3a0134bb-b7c4-7397-228f-a12bf096b197/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/521df907c9b646cc9d24dd774702d946 is 240, key is \x00\x00\x00\x00\x00\x00\x00\x01/proc:d/1732474613090/Put/seqid=0 2024-11-24T18:56:53,556 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42655 is added to blk_1073741840_1016 (size=5275) 2024-11-24T18:56:53,557 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34015 is added to blk_1073741840_1016 (size=5275) 2024-11-24T18:56:53,557 INFO [M:0;f2b92657890a:37509 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.06 KB at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:39241/user/jenkins/test-data/3a0134bb-b7c4-7397-228f-a12bf096b197/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/521df907c9b646cc9d24dd774702d946 2024-11-24T18:56:53,560 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37713/user/jenkins/test-data/227d0ac9-bc87-0601-d61f-c65ff5ddda98/WALs/f2b92657890a,45405,1732474385930/f2b92657890a%2C45405%2C1732474385930.meta.1732474386931.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T18:56:53,572 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32805-0x1016e34a4370001, quorum=127.0.0.1:59046, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-24T18:56:53,572 INFO [RS:0;f2b92657890a:32805 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-24T18:56:53,572 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32805-0x1016e34a4370001, quorum=127.0.0.1:59046, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-24T18:56:53,572 INFO [RS:0;f2b92657890a:32805 {}] regionserver.HRegionServer(1031): Exiting; stopping=f2b92657890a,32805,1732474612045; zookeeper connection closed. 2024-11-24T18:56:53,572 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@5fe708f4 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@5fe708f4 2024-11-24T18:56:53,572 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-24T18:56:53,577 DEBUG [M:0;f2b92657890a:37509 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39241/user/jenkins/test-data/3a0134bb-b7c4-7397-228f-a12bf096b197/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/59cd0644cfd14ce1bdb6d6ded8713440 is 69, key is f2b92657890a,32805,1732474612045/rs:state/1732474612509/Put/seqid=0 2024-11-24T18:56:53,581 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42655 is added to blk_1073741841_1017 (size=5156) 2024-11-24T18:56:53,581 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34015 is added to blk_1073741841_1017 (size=5156) 2024-11-24T18:56:53,581 INFO [M:0;f2b92657890a:37509 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:39241/user/jenkins/test-data/3a0134bb-b7c4-7397-228f-a12bf096b197/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/59cd0644cfd14ce1bdb6d6ded8713440 2024-11-24T18:56:53,601 DEBUG [M:0;f2b92657890a:37509 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39241/user/jenkins/test-data/3a0134bb-b7c4-7397-228f-a12bf096b197/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/18643e7fb06a411c936d4b4526bc705d is 52, key is load_balancer_on/state:d/1732474613182/Put/seqid=0 2024-11-24T18:56:53,605 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42655 is added to blk_1073741842_1018 (size=5056) 2024-11-24T18:56:53,605 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34015 is added to blk_1073741842_1018 (size=5056) 2024-11-24T18:56:53,605 INFO [M:0;f2b92657890a:37509 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:39241/user/jenkins/test-data/3a0134bb-b7c4-7397-228f-a12bf096b197/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/18643e7fb06a411c936d4b4526bc705d 2024-11-24T18:56:53,611 DEBUG [M:0;f2b92657890a:37509 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39241/user/jenkins/test-data/3a0134bb-b7c4-7397-228f-a12bf096b197/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/f7abcaa3179d4dd7ace11054f331a65e as hdfs://localhost:39241/user/jenkins/test-data/3a0134bb-b7c4-7397-228f-a12bf096b197/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/f7abcaa3179d4dd7ace11054f331a65e 2024-11-24T18:56:53,615 INFO [M:0;f2b92657890a:37509 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39241/user/jenkins/test-data/3a0134bb-b7c4-7397-228f-a12bf096b197/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/f7abcaa3179d4dd7ace11054f331a65e, entries=8, sequenceid=29, filesize=5.5 K 2024-11-24T18:56:53,617 DEBUG [M:0;f2b92657890a:37509 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39241/user/jenkins/test-data/3a0134bb-b7c4-7397-228f-a12bf096b197/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/521df907c9b646cc9d24dd774702d946 as hdfs://localhost:39241/user/jenkins/test-data/3a0134bb-b7c4-7397-228f-a12bf096b197/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/521df907c9b646cc9d24dd774702d946 2024-11-24T18:56:53,621 INFO [M:0;f2b92657890a:37509 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39241/user/jenkins/test-data/3a0134bb-b7c4-7397-228f-a12bf096b197/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/521df907c9b646cc9d24dd774702d946, entries=3, sequenceid=29, filesize=5.2 K 2024-11-24T18:56:53,622 DEBUG [M:0;f2b92657890a:37509 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39241/user/jenkins/test-data/3a0134bb-b7c4-7397-228f-a12bf096b197/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/59cd0644cfd14ce1bdb6d6ded8713440 as hdfs://localhost:39241/user/jenkins/test-data/3a0134bb-b7c4-7397-228f-a12bf096b197/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/59cd0644cfd14ce1bdb6d6ded8713440 2024-11-24T18:56:53,627 INFO [M:0;f2b92657890a:37509 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39241/user/jenkins/test-data/3a0134bb-b7c4-7397-228f-a12bf096b197/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/59cd0644cfd14ce1bdb6d6ded8713440, entries=1, sequenceid=29, filesize=5.0 K 2024-11-24T18:56:53,628 DEBUG [M:0;f2b92657890a:37509 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39241/user/jenkins/test-data/3a0134bb-b7c4-7397-228f-a12bf096b197/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/18643e7fb06a411c936d4b4526bc705d as hdfs://localhost:39241/user/jenkins/test-data/3a0134bb-b7c4-7397-228f-a12bf096b197/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/18643e7fb06a411c936d4b4526bc705d 2024-11-24T18:56:53,633 INFO [M:0;f2b92657890a:37509 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39241/user/jenkins/test-data/3a0134bb-b7c4-7397-228f-a12bf096b197/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/18643e7fb06a411c936d4b4526bc705d, entries=1, sequenceid=29, filesize=4.9 K 2024-11-24T18:56:53,634 INFO [M:0;f2b92657890a:37509 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 123ms, sequenceid=29, compaction requested=false 2024-11-24T18:56:53,635 INFO [M:0;f2b92657890a:37509 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T18:56:53,635 DEBUG [M:0;f2b92657890a:37509 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732474613510Disabling compacts and flushes for region at 1732474613510Disabling writes for close at 1732474613510Obtaining lock to block concurrent updates at 1732474613510Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732474613510Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=7850, getHeapSize=11544, getOffHeapSize=0, getCellsCount=36 at 1732474613511 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732474613512 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732474613512Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732474613525 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732474613525Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732474613534 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732474613552 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732474613552Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732474613561 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732474613576 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732474613576Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1732474613586 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1732474613600 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1732474613600Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@cbd96b7: reopening flushed file at 1732474613610 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7e73e3bd: reopening flushed file at 1732474613616 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@76490eaa: reopening flushed file at 1732474613621 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@33edb39f: reopening flushed file at 1732474613627 (+6 ms)Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 123ms, sequenceid=29, compaction requested=false at 1732474613634 (+7 ms)Writing region close event to WAL at 1732474613635 (+1 ms)Closed at 1732474613635 2024-11-24T18:56:53,635 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:56:53,635 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:56:53,636 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:56:53,636 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:56:53,636 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T18:56:53,637 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42655 is added to blk_1073741830_1006 (size=10311) 2024-11-24T18:56:53,637 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34015 is added to blk_1073741830_1006 (size=10311) 2024-11-24T18:56:53,638 INFO [M:0;f2b92657890a:37509 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-24T18:56:53,638 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-24T18:56:53,638 INFO [M:0;f2b92657890a:37509 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:37509 2024-11-24T18:56:53,638 INFO [M:0;f2b92657890a:37509 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-24T18:56:53,751 INFO [M:0;f2b92657890a:37509 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-24T18:56:53,751 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37509-0x1016e34a4370000, quorum=127.0.0.1:59046, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-24T18:56:53,751 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37509-0x1016e34a4370000, quorum=127.0.0.1:59046, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-24T18:56:53,753 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@6bfe0bbd{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T18:56:53,753 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2f95849a{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-24T18:56:53,753 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-24T18:56:53,754 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@59622fd9{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-24T18:56:53,754 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@44402286{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f9d6bc15-c0b5-ed0e-cfcf-999a38d32d67/hadoop.log.dir/,STOPPED} 2024-11-24T18:56:53,755 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-24T18:56:53,755 WARN [BP-571796543-172.17.0.2-1732474608924 heartbeating to localhost/127.0.0.1:39241 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-24T18:56:53,755 WARN [BP-571796543-172.17.0.2-1732474608924 heartbeating to localhost/127.0.0.1:39241 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-571796543-172.17.0.2-1732474608924 (Datanode Uuid 8c1004f4-e3d1-4952-a8d7-d0dd258e89b0) service to localhost/127.0.0.1:39241 2024-11-24T18:56:53,755 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-24T18:56:53,756 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f9d6bc15-c0b5-ed0e-cfcf-999a38d32d67/cluster_182f2636-85e1-4f57-e859-03b49e3286cf/data/data3/current/BP-571796543-172.17.0.2-1732474608924 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T18:56:53,756 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f9d6bc15-c0b5-ed0e-cfcf-999a38d32d67/cluster_182f2636-85e1-4f57-e859-03b49e3286cf/data/data4/current/BP-571796543-172.17.0.2-1732474608924 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T18:56:53,756 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-24T18:56:53,758 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1040cecb{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T18:56:53,759 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1f1185ee{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-24T18:56:53,759 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-24T18:56:53,759 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5e2fef96{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-24T18:56:53,759 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@b0e389f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f9d6bc15-c0b5-ed0e-cfcf-999a38d32d67/hadoop.log.dir/,STOPPED} 2024-11-24T18:56:53,761 WARN [BP-571796543-172.17.0.2-1732474608924 heartbeating to localhost/127.0.0.1:39241 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-24T18:56:53,761 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-24T18:56:53,761 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-24T18:56:53,761 WARN [BP-571796543-172.17.0.2-1732474608924 heartbeating to localhost/127.0.0.1:39241 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-571796543-172.17.0.2-1732474608924 (Datanode Uuid 53a84466-1404-4fa0-b64a-99bde38985b7) service to localhost/127.0.0.1:39241 2024-11-24T18:56:53,762 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f9d6bc15-c0b5-ed0e-cfcf-999a38d32d67/cluster_182f2636-85e1-4f57-e859-03b49e3286cf/data/data1/current/BP-571796543-172.17.0.2-1732474608924 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T18:56:53,762 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f9d6bc15-c0b5-ed0e-cfcf-999a38d32d67/cluster_182f2636-85e1-4f57-e859-03b49e3286cf/data/data2/current/BP-571796543-172.17.0.2-1732474608924 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T18:56:53,763 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-24T18:56:53,769 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@380ffe40{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-24T18:56:53,769 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3910812a{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-24T18:56:53,769 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-24T18:56:53,769 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@717a950c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-24T18:56:53,769 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3d148abe{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f9d6bc15-c0b5-ed0e-cfcf-999a38d32d67/hadoop.log.dir/,STOPPED} 2024-11-24T18:56:53,777 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-24T18:56:53,793 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-24T18:56:53,802 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnNothingWritten Thread=268 (was 228) Potentially hanging thread: nioEventLoopGroup-44-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:39241 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:39241 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:39241 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:39241 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:39241 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:39241 from jenkins.hfs.7 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HMaster-EventLoopGroup-16-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:39241 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: LeaseRenewer:jenkins.hfs.7@localhost:39241 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=530 (was 503) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=199 (was 190) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=8005 (was 8016)